Background

Open-Meteo maintains an API for historical weather that allows for non-commercial usage of historical weather data maintained by the website.

This file builds on _v001 and _v002 to run exploratory analysis on some historical weather data.

Functions and Libraries

The exploration process uses tidyverse, ranger, several generic custom functions, and several functions specific to Open Meteo processing. First, tidyverse, ranger, and the generic functions are loaded:

library(tidyverse) # tidyverse functionality is included throughout
## Warning: package 'ggplot2' was built under R version 4.2.3
## Warning: package 'tibble' was built under R version 4.2.3
## Warning: package 'purrr' was built under R version 4.2.3
## Warning: package 'dplyr' was built under R version 4.2.3
## Warning: package 'stringr' was built under R version 4.2.3
## Warning: package 'lubridate' was built under R version 4.2.3
## ── Attaching core tidyverse packages ──────────────────────── tidyverse 2.0.0 ──
## ✔ dplyr     1.1.4     ✔ readr     2.1.4
## ✔ forcats   1.0.0     ✔ stringr   1.5.1
## ✔ ggplot2   3.4.4     ✔ tibble    3.2.1
## ✔ lubridate 1.9.3     ✔ tidyr     1.3.0
## ✔ purrr     1.0.2     
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## ✖ dplyr::filter() masks stats::filter()
## ✖ dplyr::lag()    masks stats::lag()
## ℹ Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
library(ranger) # predict() does not work on ranger objects unless ranger has been called
## Warning: package 'ranger' was built under R version 4.2.3
source("./Generic_Added_Utility_Functions_202105_v001.R") # Basic functions

Next, specific functions written in _v001 are copied:

# Helper function for reading a partial CSV file
partialCSVRead <- function(loc, firstRow=1L, lastRow=+Inf, col_names=TRUE, ...) {
    
    # FUNCTION arguments
    # loc: file location
    # firstRow: first row that is relevant to the partial file read (whether header line or data line)
    # last Row: last row that is relevant to the partial file read (+Inf means read until last line of file)
    # col_names: the col_names parameter passed to readr::read_csv
    #            TRUE means header=TRUE (get column names from file, read data starting on next line)
    #            FALSE means header=FALSE (auto-generate column names, read data starting on first line)
    #            character vector means use these as column names (read data starting on first line)
    # ...: additional arguments passed to read_csv

    # Read the file and return
    # skip: rows to be skipped are all those prior to firstRow
    # n_max: maximum rows read are lastRow-firstRow, with an additional data row when col_names is not TRUE
    readr::read_csv(loc, 
                    col_names=col_names,
                    skip=firstRow-1, 
                    n_max=lastRow-firstRow+ifelse(isTRUE(col_names), 0, 1), 
                    ...
                    )
    
}


# Get the break points for gaps in a vector (e.g., 0, 3, 5:8, 20 has break points 0, 3, 5, 20 and 0, 3, 8, 30)
vecGaps <- function(x, addElements=c(), sortUnique=TRUE) {
    
    if(length(addElements)>0) x <- c(addElements, x)
    if(isTRUE(sortUnique)) x <- unique(sort(x))
    list("starts"=c(x[is.na(lag(x)) | x-lag(x)>1], +Inf), 
         "ends"=x[is.na(lead(x)) | lead(x)-x>1]
         )
    
}


# Find the break points in a single file
flatFileGaps <- function(loc) {

    which(stringr::str_length(readLines(loc))==0) %>% vecGaps(addElements=0)
    
}


# Read all relevant data as CSV with header
readMultiCSV <- function(loc, col_names=TRUE, ...) {

    gaps <- flatFileGaps(loc)
    
    lapply(seq_along(gaps$ends), 
           FUN=function(x) partialCSVRead(loc, 
                                          firstRow=gaps$ends[x]+1, 
                                          lastRow=gaps$starts[x+1]-1, 
                                          col_names=col_names, 
                                          ...
                                          )
           )
    
}


# Create URL with specified parameters for downloading data from Open Meteo
openMeteoURLCreate <- function(mainURL="https://archive-api.open-meteo.com/v1/archive", 
                               lat=45, 
                               lon=-90, 
                               startDate=paste(year(Sys.Date())-1, "01", "01", sep="-"), 
                               endDate=paste(year(Sys.Date())-1, "12", "31", sep="-"), 
                               hourlyMetrics=NULL, 
                               dailyMetrics=NULL,
                               tz="GMT", 
                               ...
                               ) {
    
    # Create formatted string
    fString <- paste0(mainURL, 
                      "?latitude=", 
                      lat, 
                      "&longitude=", 
                      lon, 
                      "&start_date=", 
                      startDate, 
                      "&end_date=", 
                      endDate
                      )
    if(!is.null(hourlyMetrics)) fString <- paste0(fString, "&hourly=", hourlyMetrics)
    if(!is.null(dailyMetrics)) fString <- paste0(fString, "&daily=", dailyMetrics)
    
    # Return the formatted string
    paste0(fString, "&timezone=", stringr::str_replace(tz, "/", "%2F"), ...)
    
}


# Helper function to simplify entry of parameters for Open Meteo download requests
helperOpenMeteoURL <- function(cityName=NULL,
                               lat=NULL,
                               lon=NULL,
                               hourlyMetrics=NULL,
                               hourlyIndices=NULL,
                               hourlyDesc=tblMetricsHourly,
                               dailyMetrics=NULL,
                               dailyIndices=NULL,
                               dailyDesc=tblMetricsDaily,
                               startDate=NULL, 
                               endDate=NULL, 
                               tz=NULL,
                               ...
                               ) {
    
    # Convert city to lat/lon if lat/lon are NULL
    if(is.null(lat) | is.null(lon)) {
        if(is.null(cityName)) stop("\nMust provide lat/lon or city name available in maps::us.cities\n")
        cityData <- maps::us.cities %>% tibble::as_tibble() %>% filter(name==cityName)
        if(nrow(cityData)!=1) stop("\nMust provide city name that maps uniquely to maps::us.cities$name\n")
        lat <- cityData$lat[1]
        lon <- cityData$long[1]
    }
    
    # Get hourly metrics by index if relevant
    if(is.null(hourlyMetrics) & !is.null(hourlyIndices)) {
        hourlyMetrics <- hourlyDesc %>% slice(hourlyIndices) %>% pull(metric)
        hourlyMetrics <- paste0(hourlyMetrics, collapse=",")
        cat("\nHourly metrics created from indices:", hourlyMetrics, "\n\n")
    }
    
    # Get daily metrics by index if relevant
    if(is.null(dailyMetrics) & !is.null(dailyIndices)) {
        dailyMetrics <- dailyDesc %>% slice(dailyIndices) %>% pull(metric)
        dailyMetrics <- paste0(dailyMetrics, collapse=",")
        cat("\nDaily metrics created from indices:", dailyMetrics, "\n\n")
    }
    
    # Use default values from OpenMeteoURLCreate() for startDate, endDate, and tz if passed as NULL
    if(is.null(startDate)) startDate <- eval(formals(openMeteoURLCreate)$startDate)
    if(is.null(endDate)) endDate <- eval(formals(openMeteoURLCreate)$endDate)
    if(is.null(tz)) tz <- eval(formals(openMeteoURLCreate)$tz)
    
    # Create and return URL
    openMeteoURLCreate(lat=lat,
                       lon=lon, 
                       startDate=startDate, 
                       endDate=endDate, 
                       hourlyMetrics=hourlyMetrics, 
                       dailyMetrics=dailyMetrics, 
                       tz=tz,
                       ...
                       )
    
}


# Read JSON data returned from Open Meteo
readOpenMeteoJSON <- function(js, mapDaily=tblMetricsDaily, mapHourly=tblMetricsHourly) {
    
    # FUNCTION arguments: 
    # js: JSON list returned by download from Open-Meteo
    # mapDaily: mapping file for daily metrics
    # mapHourly: mapping file for hourly metrics
    
    # Get the object and names
    jsObj <- jsonlite::read_json(js, simplifyVector = TRUE)
    nms <- jsObj %>% names()
    cat("\nObjects in JSON include:", paste(nms, collapse=", "), "\n\n")
    
    # Set default objects as NULL
    tblDaily <- NULL
    tblHourly <- NULL
    tblUnitsDaily <- NULL
    tblUnitsHourly <- NULL
    
    # Get daily and hourly as tibble if relevant
    if("daily" %in% nms) tblDaily <- jsObj$daily %>% tibble::as_tibble() %>% omProcessDaily()
    if("hourly" %in% nms) tblHourly <- jsObj$hourly %>% tibble::as_tibble() %>% omProcessHourly()
    
    # Helper function for unit conversions
    helperMetricUnit <- function(x, mapper, desc=NULL) {
        if(is.null(desc)) 
            desc <- as.list(match.call())$x %>% 
                deparse() %>% 
                stringr::str_replace_all(pattern=".*\\$", replacement="")
        x %>% 
            tibble::as_tibble() %>% 
            pivot_longer(cols=everything()) %>% 
            left_join(mapper, by=c("name"="metric")) %>% 
            mutate(value=stringr::str_replace(value, "\u00b0", "deg ")) %>% 
            mutate(metricType=desc) %>% 
            select(metricType, everything())
    }
    
    # Get the unit descriptions
    if("daily_units" %in% nms) tblUnitsDaily <- helperMetricUnit(jsObj$daily_units, mapDaily)
    if("hourly_units" %in% nms) tblUnitsHourly <- helperMetricUnit(jsObj$hourly_units, mapHourly)
    if(is.null(tblUnitsDaily) & !is.null(tblUnitsHourly)) tblUnits <- tblUnitsHourly
    else if(!is.null(tblUnitsDaily) & is.null(tblUnitsHourly)) tblUnits <- tblUnitsDaily
    else if(!is.null(tblUnitsDaily) & !is.null(tblUnitsHourly)) 
        tblUnits <- bind_rows(tblUnitsHourly, tblUnitsDaily)
    else tblUnits <- NULL
    
    # Put everything else together
    tblDescription <- jsObj[setdiff(nms, c("hourly", "hourly_units", "daily", "daily_units"))] %>%
        tibble::as_tibble()
    
    # Return the list objects
    list(tblDaily=tblDaily, tblHourly=tblHourly, tblUnits=tblUnits, tblDescription=tblDescription)
    
}


# Return Open meteo metadata in prettified format
prettyOpenMeteoMeta <- function(df, extr="tblDescription") {
    if("list" %in% class(df)) df <- df[[extr]]
    for(name in names(df)) {
        cat("\n", name, ": ", df %>% pull(name), sep="")
    }
    cat("\n\n")
}


# Process Open Meteo daily data
omProcessDaily <- function(tbl, extr="tblDaily") {
    if("list" %in% class(tbl)) tbl <- tbl[[extr]]
    tbl %>% mutate(date=lubridate::ymd(time)) %>% select(date, everything())
}


# Process Open meteo hourly data
omProcessHourly <- function(tbl, extr="tblHourly") {
    if("list" %in% class(tbl)) tbl <- tbl[[extr]]
    tbl %>% 
        mutate(origTime=time, 
               time=lubridate::ymd_hm(time), 
               date=lubridate::date(time), 
               hour=lubridate::hour(time)
               ) %>% 
        select(time, date, hour, everything())
}


# Simple predictive model for categorical variable
simpleOneVarPredict <- function(df, 
                                tgt, 
                                prd, 
                                dfTest=NULL,
                                nPrint=30, 
                                showPlot=TRUE, 
                                returnData=TRUE
                                ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame or tibble with key elements (training data set)
    # tgt: target variable
    # prd: predictor variable
    # dfTest: test dataset for applying predictions
    # nPrint: maximum number of lines of confusion matrix to print
    #         0 means do not print any summary statistics
    # showPlot: boolean, should overlap plot be created and shown?
    
    # Counts of predictor to target variable
    dfPred <- df %>%
        group_by(across(all_of(c(prd, tgt)))) %>%
        summarize(n=n(), .groups="drop") %>%
        arrange(across(all_of(prd)), desc(n)) %>%
        group_by(across(all_of(prd))) %>%
        mutate(correct=row_number()==1, predicted=first(get(tgt))) %>%
        ungroup()

    # Confusion matrix and accuracy
    dfConf <- dfPred %>%
        group_by(across(all_of(c(tgt, "correct")))) %>%
        summarize(n=sum(n), .groups="drop") %>%
        pivot_wider(id_cols=tgt, names_from=correct, values_from=n, values_fill=0) %>%
        mutate(n=`TRUE`+`FALSE`, 
               pctCorrect=`TRUE`/n, 
               pctNaive=1/(nrow(.)), 
               lift=pctCorrect/pctNaive-1
               )
    
    # Overall confusion matrix
    dfConfAll <- dfConf %>%
        summarize(nMax=max(n), across(c(`FALSE`, `TRUE`, "n"), sum)) %>%
        mutate(pctCorrect=`TRUE`/n, 
               pctNaive=nMax/n, 
               lift=pctCorrect/pctNaive-1, 
               nBucket=length(unique(dfPred[[prd]]))
               )
    
    # Print confusion matrices
    if(nPrint > 0) {
        cat("\nAccuracy by target subgroup (training data):\n")
        dfConf %>% print(n=nPrint)
        cat("\nOverall Accuracy (training data):\n")
        dfConfAll %>% print(n=nPrint)
    }
    
    # Plot of overlaps
    if(isTRUE(showPlot)) {
        p1 <- dfPred %>%
            group_by(across(c(all_of(tgt), "predicted", "correct"))) %>%
            summarize(n=sum(n), .groups="drop") %>%
            ggplot(aes(x=get(tgt), y=predicted)) + 
            labs(x="Actual", 
                 y="Predicted", 
                 title=paste0("Training data - Actual vs. predicted ", tgt), 
                 subtitle=paste0("(using ", prd, ")")
                 ) + 
            geom_text(aes(label=n)) + 
            geom_tile(aes(fill=correct), alpha=0.25)
        print(p1)
    }
    
    # Create metrics for test dataset if requested
    if(!is.null(dfTest)) {
        # Get maximum category from training data
        mostPredicted <- count(dfPred, predicted, wt=n) %>% slice(1) %>% pull(predicted)
        # Get mapping of metric to prediction
        dfPredict <- dfPred %>% 
            group_by(across(all_of(c(prd, "predicted")))) %>% 
            summarize(n=sum(n), .groups="drop")
        # Create predictions for test data
        dfPredTest <- dfTest %>%
            select(all_of(c(prd, tgt))) %>%
            left_join(select(dfPredict, -n)) %>%
            replace_na(list(predicted=mostPredicted)) %>%
            group_by(across(all_of(c(prd, tgt, "predicted")))) %>%
            summarize(n=n(), .groups="drop") %>%
            mutate(correct=(get(tgt)==predicted))
        # Create confusion statistics for test data
        dfConfTest <- dfPredTest %>%
            group_by(across(all_of(c(tgt, "correct")))) %>%
            summarize(n=sum(n), .groups="drop") %>%
            pivot_wider(id_cols=tgt, names_from=correct, values_from=n, values_fill=0) %>%
            mutate(n=`TRUE`+`FALSE`, 
                   pctCorrect=`TRUE`/n, 
                   pctNaive=1/(nrow(.)), 
                   lift=pctCorrect/pctNaive-1
                   )
        # Overall confusion matrix for test data
        dfConfAllTest <- dfConfTest %>%
            summarize(nMax=max(n), across(c(`FALSE`, `TRUE`, "n"), sum)) %>%
            mutate(pctCorrect=`TRUE`/n, 
                   pctNaive=nMax/n, 
                   lift=pctCorrect/pctNaive-1, 
                   nBucket=length(unique(dfConfTest[[prd]]))
               )
        # Print confusion matrices
        if(nPrint > 0) {
            cat("\nAccuracy by target subgroup (testing data):\n")
            dfConfTest %>% print(n=nPrint)
            cat("\nOverall Accuracy (testing data):\n")
            dfConfAllTest %>% print(n=nPrint)
            }
    } else {
        dfPredTest <- NULL
        dfConfTest <- NULL
        dfConfAllTest <- NULL
        
    }
    
    # Return data if requested
    if(isTRUE(returnData)) list(dfPred=dfPred, 
                                dfConf=dfConf, 
                                dfConfAll=dfConfAll, 
                                dfPredTest=dfPredTest, 
                                dfConfTest=dfConfTest, 
                                dfConfAllTest=dfConfAllTest
                                )
    
}


# Fit a single predictor to a single categorical variable
simpleOneVarFit <- function(df, 
                            tgt, 
                            prd, 
                            rankType="last", 
                            naMethod=TRUE
                            ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame or tibble with key elements (training data set)
    # tgt: target variable
    # prd: predictor variable
    # rankType: method for breaking ties of same n, passed to base::rank as ties.method=
    # naMethod: method for handling NA in ranks, passed to base::rank as na.last=
    
    # Counts of predictor to target variable, and associated predictions
    df %>%
        group_by(across(all_of(c(prd, tgt)))) %>%
        summarize(n=n(), .groups="drop") %>%
        arrange(across(all_of(prd)), desc(n), across(all_of(tgt))) %>%
        group_by(across(all_of(prd))) %>%
        mutate(rankN=n()+1-rank(n, ties.method=rankType, na.last=naMethod)) %>%
        arrange(across(all_of(prd)), rankN) %>%
        ungroup()

}


# Create categorical predictions mapper
simpleOneVarMapper <- function(df, tgt, prd) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame or tibble from SimpleOneVarFit()
    # tgt: target variable
    # prd: predictor variable
    
    # Get the most common actual results
    dfCommon <- df %>% count(across(all_of(tgt)), wt=n, sort=TRUE)
    
    # Get the predictions
    dfPredictor <- df %>%
        group_by(across(all_of(prd))) %>%
        filter(row_number()==1) %>%
        select(all_of(c(prd, tgt))) %>%
        ungroup()
    
    list(dfPredictor=dfPredictor, dfCommon=dfCommon)
    
}


# Map the categorical predictions to unseen data
simpleOneVarApplyMapper <- function(df, 
                                    tgt,
                                    prd, 
                                    mapper, 
                                    mapperDF="dfPredictor", 
                                    mapperDefault="dfCommon",
                                    prdName="predicted"
                                    ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame containing prd for predicting tgt
    # tgt: target variable in df
    # prd: predictor variable in df
    # mapper: mapping list from sinpleOneVarMapper()
    # mapperDF: element that can be used to merge mappings
    # mapperDefault: element that can be used for NA resulting from merging mapperDF
    # prdName: name for the prediction variable
    
    # Extract the mapper and default value
    vecRename <- c(prdName) %>% purrr::set_names(tgt)
    dfMap <- mapper[[mapperDF]] %>% select(all_of(c(prd, tgt))) %>% colRenamer(vecRename=vecRename)
    chrDefault <- mapper[[mapperDefault]] %>% slice(1) %>% pull(tgt)
    
    # Merge mappings to df
    df %>%
        left_join(dfMap, by=prd) %>%
        replace_na(list("predicted"=chrDefault))
    
}


# Create confusion matrix data for categorical predictions
simpleOneVarConfusionData <- function(df, 
                                      tgtOrig,
                                      tgtPred, 
                                      otherVars=c(),
                                      weightBy="n"
                                      ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame from simpleOneVarApplyMapper()
    # tgtOrig: original target variable name in df
    # tgtPred: predicted target variable name in df
    # otherVars: other variables to be kept (will be grouping variables)
    # weightBy: weighting variable for counts in df (NULL means count each row of df as 1)
    
    # Confusion matrix data creation
    df %>%
        group_by(across(all_of(c(tgtOrig, tgtPred, otherVars)))) %>%
        summarize(n=if(!is.null(weightBy)) sum(get(weightBy)) else n(), .groups="drop") %>%
        mutate(correct=get(tgtOrig)==get(tgtPred))
    
}


# Print and plot confusion matrix for categorical predictions
simpleOneVarConfusionReport <- function(df, 
                                        tgtOrig,
                                        tgtPred, 
                                        otherVars=c(), 
                                        printConf=TRUE,
                                        printConfOrig=printConf, 
                                        printConfPred=printConf,
                                        printConfOverall=printConf, 
                                        plotConf=TRUE, 
                                        plotDesc="",
                                        nBucket=NA, 
                                        predictorVarName="", 
                                        returnData=FALSE
                                        ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame from simpleOneVarConfusionData()
    # tgtOrig: original target variable name in df
    # tgtPred: predicted target variable name in df
    # otherVars: other variables to be kept (will be grouping variables) - NOT IMPLEMENTED
    # printConf: boolean, should confusion matrix data be printed? Applies to all three
    # printConfOrig: boolean, should confusion data be printed based on original target variable?
    # printConfPred: boolean, should confusion data be printed based on predicted target variable?
    # printConfOverall: boolean, should overall confusion data be printed?
    # plotConf: boolean, should confusion overlap data be plotted?
    # plotDesc: descriptive label to be included in front of plot title
    # nBucket: number of buckets used for prediction (pass from previous data)
    # predictorVarName: variable name to be included in chart description
    # returnData: boolean, should the confusion matrices be returned?
    
    # Confusion data based on original target variable
    if(isTRUE(printConfOrig) | isTRUE(returnData)) {
        dfConfOrig <- df %>%
            group_by(across(all_of(c(tgtOrig)))) %>%
            summarize(right=sum(n*correct), wrong=sum(n)-right, n=sum(n), .groups="drop") %>%
            mutate(pctRight=right/n, pctNaive=n/(sum(n)), lift=pctRight/pctNaive-1)
    }

    # Confusion data based on predicted target variable
    if(isTRUE(printConfPred) | isTRUE(returnData)) {
        dfConfPred <- df %>%
            group_by(across(all_of(c(tgtPred)))) %>%
            summarize(right=sum(n*correct), wrong=sum(n)-right, n=sum(n), .groups="drop") %>%
            mutate(pctRight=right/n)
    }

    # Overall confusion data
    if(isTRUE(printConfOverall) | isTRUE(returnData)) {
        maxNaive <- df %>%
            group_by(across(all_of(tgtOrig))) %>%
            summarize(n=sum(n), .groups="drop") %>%
            arrange(desc(n)) %>%
            slice(1) %>%
            pull(n)
        dfConfOverall <- df %>%
            summarize(right=sum(n*correct), wrong=sum(n)-right, n=sum(n), .groups="drop") %>%
            mutate(maxN=maxNaive, pctRight=right/n, pctNaive=maxN/n, lift=pctRight/pctNaive-1, nBucket=nBucket)
    }
    
    # Confusion report based on original target variable
    if(isTRUE(printConfOrig)) {
        cat("\nConfusion data based on original target variable:", tgtOrig, "\n")
        dfConfOrig %>%
            print(n=50)
    }

    # Confusion report based on predicted target variable
    if(isTRUE(printConfPred)) {
        cat("\nConfusion data based on predicted target variable:", tgtPred, "\n")
        dfConfPred %>%
            print(n=50)
    }
    
    # Overall confusion matrix
    if(isTRUE(printConfOverall)) {
        cat("\nOverall confusion matrix\n")
        dfConfOverall %>%
            print(n=50)
    }
    
    # Plot of overlaps
    if(isTRUE(plotConf)) {
        p1 <- df %>%
            group_by(across(all_of(c(tgtOrig, tgtPred, "correct")))) %>%
            summarize(n=sum(n), .groups="drop") %>%
            ggplot(aes(x=get(tgtOrig), y=get(tgtPred))) + 
            labs(x="Actual", 
                 y="Predicted", 
                 title=paste0(plotDesc, "Actual vs. predicted ", tgtOrig), 
                 subtitle=paste0("(using ", predictorVarName, ")")
                 ) + 
            geom_text(aes(label=n)) + 
            geom_tile(aes(fill=correct), alpha=0.25)
        print(p1)
    }
    
    # Return data if requested
    if(isTRUE(returnData)) list(dfConfOrig=dfConfOrig, dfConfPred=dfConfPred, dfConfOverall=dfConfOverall)
    
}


# Process for chaining predictor, applier, and confusion matrix for categorical variables
simpleOneVarChain <- function(df,
                              tgt,
                              prd,
                              mapper=NULL, 
                              rankType="last", 
                              naMethod=TRUE, 
                              printReport=TRUE, 
                              plotDesc="",
                              returnData=TRUE, 
                              includeConfData=FALSE
                              ) {

    # FUNCTION ARGUMENTS:
    # df: data frame or tibble with key elements (training or testing data set)
    # tgt: target variable
    # prd: predictor variable
    # mapper: mapping file to be applied for predictions (NULL means create from simpleOneVarApply())
    # rankType: method for breaking ties of same n, passed to base::rank as ties.method=
    # naMethod: method for handling NA in ranks, passed to base::rank as na.last=    
    # printReport: boolean, should the confusion report data and plot be printed?
    # plotDesc: descriptive label to be included in front of plot title
    # returnData: boolean, should data elements be returned?
    # includeConfData: boolean, should confusion data be returned?
    
    # Create the summary of predictor-target-n
    dfFit <- simpleOneVarFit(df, tgt=tgt, prd=prd, rankType=rankType, naMethod=naMethod)     

    # Create the mapper if it does not already exist
    if(is.null(mapper)) mapper <- simpleOneVarMapper(dfFit, tgt=tgt, prd=prd)
    
    # Apply mapper to data
    dfApplied <- simpleOneVarApplyMapper(dfFit, tgt=tgt, prd=prd, mapper=mapper)

    # Create confusion data
    dfConfusion <- simpleOneVarConfusionData(dfApplied, tgtOrig=tgt, tgtPred="predicted")
    
    # Create confusion report if requested
    if(isTRUE(printReport) | isTRUE(includeConfData)) {
        dfConfReport <- simpleOneVarConfusionReport(df=dfConfusion, 
                                                    tgtOrig=tgt, 
                                                    tgtPred="predicted", 
                                                    nBucket=length(unique(dfApplied[[prd]])), 
                                                    predictorVarName=prd, 
                                                    printConf=printReport, 
                                                    plotConf=printReport,
                                                    plotDesc=plotDesc,
                                                    returnData=includeConfData
                                                    )
    }
    
    # Return data if requested
    if(isTRUE(returnData)) {
        ret <- list(dfFit=dfFit, mapper=mapper, dfApplied=dfApplied, dfConfusion=dfConfusion)
        if(isTRUE(includeConfData)) ret<-c(ret, list(dfConfData=dfConfReport))
        ret
    }
    
}


# Adds a train-test component for single variable predictions
simpleOneVarTrainTest <- function(dfTrain,
                                  dfTest,
                                  tgt,
                                  prd,
                                  rankType="last", 
                                  naMethod=TRUE, 
                                  printReport=FALSE, 
                                  includeConfData=TRUE, 
                                  returnData=TRUE
                              ) {

    # FUNCTION ARGUMENTS:
    # dfTrain: data frame or tibble with key elements (training data set)
    # dfTest: data frame or tibble with key elements (testing data set)
    # tgt: target variable
    # prd: predictor variable
    # rankType: method for breaking ties of same n, passed to base::rank as ties.method=
    # naMethod: method for handling NA in ranks, passed to base::rank as na.last=    
    # printReport: boolean, should the confusion report data and plot be printed?
    # includeConfData: boolean, should confusion data be returned?
    # returnData: boolean, should data elements be returned?
    
    # Fit the training data
    tmpTrain <- simpleOneVarChain(df=dfTrain, 
                                  tgt=tgt, 
                                  prd=prd,
                                  rankType=rankType,
                                  naMethod=naMethod,
                                  printReport=printReport,
                                  plotDesc="Training data: ",
                                  returnData=TRUE,
                                  includeConfData=includeConfData
                                  )
    
    # Fit the testing data
    tmpTest <- simpleOneVarChain(df=dfTest, 
                                 tgt=tgt, 
                                 prd=prd,
                                 mapper=tmpTrain$mapper,
                                 rankType=rankType,
                                 naMethod=naMethod,
                                 printReport=printReport,
                                 plotDesc="Testing data: ",
                                 returnData=TRUE,
                                 includeConfData=includeConfData
                                 )
    
    # Return data if requested
    if(isTRUE(returnData)) list(tmpTrain=tmpTrain, tmpTest=tmpTest)
    
}


# Plot the means by cluster and variable for a k-means object
plotClusterMeans <- function(km, nrow=NULL, ncol=NULL, scales="fixed") {

    # FUNCTION ARGUMENTS
    # km: object returned by stats::kmeans(...)
    # nrow: number of rows for faceting (NULL means default)
    # ncol: number of columns for faceting (NULL means default)
    # scales: passed to facet_wrap as scales=scales
    
    # Assess clustering by dimension
    p1 <- km$centers %>%
        tibble::as_tibble() %>%
        mutate(cluster=row_number()) %>%
        pivot_longer(cols=-c(cluster)) %>%
        ggplot(aes(x=fct_reorder(name, 
                                 value, 
                                 .fun=function(a) ifelse(length(a)==2, a[2]-a[1], diff(range(a)))
                                 ), 
                   y=value
                   )
               ) + 
        geom_point(aes(color=factor(cluster))) + 
        scale_color_discrete("Cluster") + 
        facet_wrap(~factor(cluster), nrow=nrow, ncol=ncol, scales=scales) +
        labs(title=paste0("Cluster means (kmeans, centers=", nrow(km$centers), ")"), 
             x="Metric", 
             y="Cluster mean"
             ) + 
        geom_hline(yintercept=median(km$centers), lty=2) +
        coord_flip()
    print(p1)
    
}


# Plot percentage by cluster
plotClusterPct <- function(df, km, keyVars, nRowFacet=1, printPlot=TRUE) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame initially passed to stats::kmeans(...)
    # km: object returned by stats::kmeans(...)
    # keyVars: character vector of length 1 (y-only, x will be cl) or length 2 (x, y, cl will facet)
    # nRowFacet: number of rows for facetting (only relevant if length(keyVars) is 2)
    # printPlot: boolean, should plot be printed? (if not true, plot will be returned)
    
    # Check length of keyVars
    if(!(length(keyVars) %in% c(1, 2))) stop("\nArgument keyVars must be length-1 or length-2\n")
    
    p1 <- df %>%
        mutate(cl=factor(km$cluster)) %>%
        group_by(across(c(all_of(keyVars), "cl"))) %>%
        summarize(n=n(), .groups="drop") %>%
        group_by(across(all_of(keyVars))) %>%
        mutate(pct=n/sum(n)) %>%
        ungroup() %>%
        ggplot() + 
        scale_fill_continuous(low="white", high="green") + 
        labs(title=paste0("Percentage by cluster (kmeans with ", nrow(km$centers), " centers)"), 
             x=ifelse(length(keyVars)==1, "Cluster", keyVars[1]), 
             y=ifelse(length(keyVars)==1, keyVars[1], keyVars[2])
             )
    if(length(keyVars)==1) p1 <- p1 + geom_tile(aes(fill=pct, x=cl, y=get(keyVars[1])))
    if(length(keyVars)==2) {
        p1 <- p1 + 
            geom_tile(aes(fill=pct, x=get(keyVars[1]), y=get(keyVars[2]))) + 
            facet_wrap(~cl, nrow=nRowFacet)
    }
    
    if(isTRUE(printPlot)) print(p1)
    else return(p1)
    
}


# Run k-means (or use passed k-means object) and plot centers and percentages of observations
runKMeans <- function(df, 
                      km=NULL,
                      vars=NULL, 
                      centers=2, 
                      nStart=1L, 
                      iter.max=10L, 
                      seed=NULL, 
                      plotMeans=FALSE,
                      nrowMeans=NULL,
                      plotPct=NULL, 
                      nrowPct=1, 
                      returnKM=is.null(km)
                      ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame for clustering
    # km: k-means object (will shut off k-means processing and run as plot-only)
    # vars: variables to be used for clustering (NULL means everything in df)
    # centers: number of centers
    # nStart: passed to kmeans
    # iter.max: passed to kmeans
    # seed: seed to be set (if NULL, no seed is set)
    # plotMeans: boolean, plot variable means by cluster?
    # nrowMeans: argument passed as nrow for faceting rows in plotClusterMeans() - NULL is default ggplot2
    # plotPct: list of character vectors to be passed sequentially as keyVars to plotClusterPct()
    #          NULL means do not run
    #          pctByCluster=list(c("var1"), c("var2", "var3")) will run plotting twice
    # nrowPct: argument for faceting number of rows in plotClusterPct()
    # returnKM: boolean, should the k-means object be returned?
    
    # Set seed if requested
    if(!is.null(seed)) set.seed(seed)
    
    # Get the variable names if passed as NULL
    if(is.null(vars)) vars <- names(df)
    
    # Run the k-means process if the object has not been passed
    if(is.null(km)) {
        km <- df %>%
            select(all_of(vars)) %>% 
            kmeans(centers=centers, iter.max=iter.max, nstart=nStart)
    }

    # Assess clustering by dimension if requested
    if(isTRUE(plotMeans)) plotClusterMeans(km, nrow=nrowMeans)
    if(!is.null((plotPct))) 
        for(ctr in 1:length(plotPct)) 
            plotClusterPct(df=df, km=km, keyVars=plotPct[[ctr]], nRowFacet=nrowPct)
    
    # Return the k-means object
    if(isTRUE(returnKM)) return(km)
    
}


# Assign points to closest center of a passed k-means object
assignKMeans <- function(km, df, returnAllDistanceData=FALSE) {
    
    # FUNCTION ARGUMENTS:
    # km: a k-means object
    # df: data frame or tibble
    # returnAllDistanceData: boolean, should the distance data and clusters be returned?
    #                        TRUE returns a data frame with distances as V1, V2, ..., and cluster as cl
    #                        FALSE returns a vector of cluster assignments as integers
    
    # Select columns from df to match km
    df <- df %>% select(all_of(colnames(km$centers)))
    if(!all.equal(names(df), colnames(km$centers))) stop("\nName mismatch in clustering and frame\n")
    
    # Create the distances and find clusters
    distClust <- sapply(seq_len(nrow(km$centers)), 
                        FUN=function(x) sqrt(rowSums(sweep(as.matrix(df), 
                                                           2, 
                                                           t(as.matrix(km$centers[x,,drop=FALSE]))
                                                           )**2
                                                     )
                                             )
                        ) %>% 
        as.data.frame() %>% 
        tibble::as_tibble() %>% 
        mutate(cl=apply(., 1, which.min))
    
    # Return the proper file
    if(isTRUE(returnAllDistanceData)) return(distClust)
    else return(distClust$cl)
    
}

As well, specific functions from _v002 are copied:

runSimpleRF <- function(df, yVar, xVars=NULL, ...) {

    # FUNCTION ARGUMENTS:
    # df: data frame containing observations
    # yVar: variable to be predicted (numeric for regression, categorical for classification)
    # xVars: predictor variables (NULL means everything in df except for yVar)
    # ...: other arguments passed to ranger::ranger
    
    # Create xVars if passed as NULL
    if(is.null(xVars)) xVars <- setdiff(names(df), yVar)
    
    # Simple random forest model
    ranger::ranger(as.formula(paste0(yVar, "~", paste0(xVars, collapse="+"))), 
                   data=df[, c(yVar, xVars)], 
                   ...
                   )
    
}

plotRFImportance <- function(rf, 
                             impName="variable.importance", 
                             divBy=1000, 
                             plotTitle=NULL, 
                             plotData=TRUE, 
                             returnData=!isTRUE(plotData)
                             ) {
    
    # FUNCTION ARGUMENTS:
    # rf: output list from random forest with an element for importance
    # impName: name of the element to extract from rf
    # divBy: divisor for the importance variable
    # plotTitle: title for plot (NULL means use default)
    # plotData: boolean, should the importance plot be created and printed?
    # returnData: boolean, should the processed data be returned?
    
    # Create title if not provided
    if(is.null(plotTitle)) plotTitle <- "Importance for simple random forest"

    # Create y-axis label
    yAxisLabel="Variable Importance"
    if(!isTRUE(all.equal(divBy, 1))) yAxisLabel <- paste0(yAxisLabel, " (", divBy, "s)")
    
    # Create variable importance
    df <- rf[[impName]] %>% 
        as.data.frame() %>% 
        purrr::set_names("imp") %>% 
        rownames_to_column("metric") %>% 
        tibble::as_tibble() 
    
    # Create and print plot if requested
    if(isTRUE(plotData)) {
        p1 <- df %>%
            ggplot(aes(x=fct_reorder(metric, imp), y=imp/divBy)) + 
            geom_col(fill="lightblue") + 
            labs(x=NULL, y=yAxisLabel, title=plotTitle) +
            coord_flip()
        print(p1)
    }
    
    # Return data if requested
    if(isTRUE(returnData)) return(df)
    
}

predictRF <- function(rf, df, newCol="pred", predsOnly=FALSE) {
    
    # FUNCTION ARGUMENTS:
    # rf: a trained random forest model
    # df: data frame for adding predictions
    # newCol: name for new column to be added to df
    # predsOnly: boolean, should only the vector of predictions be returned?
    #            if FALSE, a column named newCol is added to df, with df returned

    # Performance on holdout data
    preds <- predict(rf, data=df)$predictions
    
    # Return just the predictions if requested otherwise add as final column to df
    if(isTRUE(predsOnly)) return(preds)
    else {
        df[newCol] <- preds
        return(df)
    }
    
}

# Update for continuous variables
reportAccuracy <- function(df, 
                           trueCol, 
                           predCol="pred", 
                           reportAcc=TRUE, 
                           rndReport=2, 
                           useLabel="requested data",
                           returnAcc=!isTRUE(reportAcc), 
                           reportR2=FALSE
                           ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame containing actual and predictions
    # trueCol: column containing true value
    # predCol: column containing predicted value
    # reportAcc: boolean, should accuracy be reported (printed to output)?
    # rndReport: number of significant digits for reporting (will be converted to percentage first)
    # useLabel: label for data to be used in reporting
    # returnAcc: boolean, should the accuracy be returned 
    #            return value is not converted to percentage, not rounded
    # reportR2: boolean, should accuracy be calculated as R-squared?
    #           (default FALSE measures as categorical)
    
    # Continuous or categorical reporting
    if(isTRUE(reportR2)) {
        tc <- df %>% pull(get(trueCol))
        pc <- df %>% pull(get(predCol))
        mseNull <- mean((tc-mean(tc))**2)
        msePred <- mean((tc-pc)**2)
        r2 <- 1 - msePred/mseNull
        if(isTRUE(reportAcc)) 
            cat("\nR-squared of ", 
                useLabel, 
                " is: ", 
                round(100*r2, rndReport), 
                "% (RMSE ",
                round(sqrt(msePred), 2), 
                " vs. ", 
                round(sqrt(mseNull), 2),
                " null)\n", 
                sep=""
                )
        acc <- c("mseNull"=mseNull, "msePred"=msePred, "r2"=r2)
    } else {
        acc <- mean(df[trueCol]==df[predCol])
        if(isTRUE(reportAcc)) 
            cat("\nAccuracy of ", useLabel, " is: ", round(100*acc, rndReport), "%\n", sep="")    
    }
    
    # Return accuracy statistic if requested
    if(isTRUE(returnAcc)) return(acc)
    
}

# Update for automated rounding
plotConfusion <- function(df, 
                          trueCol, 
                          predCol="pred", 
                          useTitle=NULL,
                          useSub=NULL, 
                          plotCont=FALSE, 
                          rndTo=NULL,
                          rndBucketsAuto=100,
                          nSig=NULL,
                          refXY=FALSE
                          ) {
    
    # FUNCTION ARGUMENTS:
    # df: data frame containing actual and predictions
    # trueCol: column containing true value
    # predCol: column containing predicted value
    # useTitle: title to be used for chart (NULL means create from trueCol)
    # useSub: subtitle to be used for chart (NULL means none)
    # plotCont: boolean, should plotting assume continuous variables?
    #           (default FALSE assumes confusion plot for categorical variables)
    # rndTo: every number in x should be rounded to the nearest rndTo
    #        NULL means no rounding (default)
    #        -1L means make an estimate based on data
    # rndBucketsAuto: integer, if rndTo is -1L, about how many buckets are desired for predictions?
    # nSig: number of significant digits for automatically calculated rounding parameter
    #       (NULL means calculate exactly)
    # refXY: boolean, should a reference line for y=x be included? (relevant only for continuous)
    
    # Create title if not supplied
    if(is.null(useTitle)) useTitle <- paste0("Predicting ", trueCol)

    # Function auto-round returns vector as-is when rndTo is NULL and auto-rounds when rndTo is -1L
    df <- df %>%
        mutate(across(all_of(c(trueCol, predCol)), 
                      .fns=function(x) autoRound(x, rndTo=rndTo, rndBucketsAuto=rndBucketsAuto, nSig=nSig)
                      )
               )
    
    # Create base plot (applicable to categorical or continuous variables)
    # Use x as true and y as predicted, for more meaningful geom_smooth() if continuous
    # Flip coordinates if categorical
    p1 <- df %>%
        group_by(across(all_of(c(trueCol, predCol)))) %>%
        summarize(n=n(), .groups="drop") %>%
        ggplot(aes(y=get(predCol), x=get(trueCol))) + 
        labs(y="Predicted", x="Actual", title=useTitle, subtitle=useSub)
        
    # Update plot as appropriate
    if(isTRUE(plotCont)) {
        p1 <- p1 +
            geom_point(aes(size=n), alpha=0.5) + 
            scale_size_continuous("# Obs") +
            geom_smooth(aes(weight=n), method="lm")
        if(isTRUE(refXY)) p1 <- p1 + geom_abline(slope=1, intercept=0, lty=2, color="red")
    } else {
        p1 <- p1 + 
            geom_tile(aes(fill=n)) + 
            geom_text(aes(label=n), size=2.5) +
            coord_flip() +
            scale_fill_continuous("", low="white", high="green")
    }
    
    # Output plot
    print(p1)
    
}

runFullRF <- function(dfTrain, 
                      yVar, 
                      xVars, 
                      dfTest=dfTrain,
                      useLabel="test data",
                      useSub=NULL, 
                      isContVar=FALSE,
                      rndTo=NULL,
                      rndBucketsAuto=100,
                      nSig=NULL,
                      refXY=FALSE,
                      makePlots=TRUE,
                      plotImp=makePlots,
                      plotConf=makePlots,
                      returnData=FALSE, 
                      ...
                      ) {
    
    # FUNCTION ARGUMENTS:
    # dfTrain: training data
    # yVar: dependent variable
    # xVars: column(s) containing independent variables
    # dfTest: test dataset for applying predictions
    # useLabel: label to be used for reporting accuracy
    # useSub: subtitle to be used for confusion chart (NULL means none)
    # isContVar: boolean, is the variable continuous? (default FALSE means categorical)
    # rndTo: every number in x should be rounded to the nearest rndTo
    #        NULL means no rounding (default)
    #        -1L means make an estimate based on data
    # rndBucketsAuto: integer, if rndTo is -1L, about how many buckets are desired for predictions?
    # nSig: number of significant digits for automatically calculated rounding parameter
    #       (NULL means calculate exactly)    
    # refXY: boolean, should a reference line for y=x be included? (relevant only for continuous)
    # makePlots: boolean, should plots be created for variable importance and confusion matrix?
    # plotImp: boolean, should variable importance be plotted? (default is makePlots)
    # plotConf: boolean, should confusion matrix be plotted? (default is makePlots)
    # returnData: boolean, should data be returned?
    # ...: additional parameters to pass to runSimpleRF(), which are then passed to ranger::ranger()

    # 1. Run random forest using impurity for importance
    rf <- runSimpleRF(df=dfTrain, yVar=yVar, xVars=xVars, importance="impurity", ...)

    # 2. Create, and optionally plot, variable importance
    rfImp <- plotRFImportance(rf, plotData=plotImp, returnData=TRUE)

    # 3. Predict on test dataset
    tstPred <- predictRF(rf=rf, df=dfTest)

    # 4. Report on accuracy (updated for continuous or categorical)
    rfAcc <- reportAccuracy(tstPred, 
                            trueCol=yVar, 
                            rndReport=3, 
                            useLabel=useLabel, 
                            reportR2=isTRUE(isContVar),
                            returnAcc=TRUE
                            )

    # 5. Plot confusion data (updated for continuous vs. categorical) if requested
    if(isTRUE(plotConf)) {
        plotConfusion(tstPred, 
                      trueCol=yVar, 
                      useSub=useSub, 
                      plotCont=isTRUE(isContVar), 
                      rndTo=rndTo, 
                      rndBucketsAuto=rndBucketsAuto,
                      nSig=nSig,
                      refXY=refXY
                      )
    }
    
    #6. Return data if requested
    if(isTRUE(returnData)) return(list(rf=rf, rfImp=rfImp, tstPred=tstPred, rfAcc=rfAcc))
    
}

runPartialImportanceRF <- function(dfTrain, 
                                   yVar, 
                                   dfTest,
                                   impDB=dfImp,
                                   nImp=+Inf,
                                   otherX=c(),
                                   isContVar=TRUE, 
                                   useLabel=keyLabel, 
                                   useSub=stringr::str_to_sentence(keyLabel), 
                                   rndTo=NULL,
                                   rndBucketsAuto=50,
                                   nSig=NULL,
                                   refXY=FALSE,
                                   makePlots=FALSE, 
                                   returnElem=c("rfImp", "rfAcc")
                                   ) {
    
    # FUNCTION ARGUMENTS
    # dfTrain: training data
    # yVar: y variable in dfTrain
    # dfTest: test data
    # impDB: tibble containing variable importance by dependent variable
    # nImp: use the top nImp variables by variable importance
    # otherX: include these additional x variables
    # isContVar: boolean, is this a continuous variable (regression)? FALSE means classification
    # useLabel: label for description
    # useSub: label for plot
    # rndTo: controls the rounding parameter for plots, passed to runFullRF 
    #        (NULL means no rounding)
    #        -1L means make an estimate based on underlying data
    # rndBucketsAuto: integer, if rndTo is -1L, about how many buckets are desired for predictions?
    # nSig: number of significant digits for automatically calculated rounding parameter
    #       (NULL means calculate exactly)    
    # refXY: controls the reference line parameter for plots, passed to runFullRF
    # makePlots: boolean, should plots be created?
    # returnElem: character vector of list elements to be returned

    runFullRF(dfTrain=dfTrain, 
              yVar=yVar, 
              xVars=unique(c(impDB %>% filter(n<=nImp, src==yVar) %>% pull(metric), otherX)), 
              dfTest=dfTest, 
              isContVar = isContVar, 
              useLabel=useLabel, 
              useSub=useSub, 
              rndTo=rndTo,
              rndBucketsAuto=rndBucketsAuto,
              nSig=nSig,
              refXY=refXY,
              makePlots=makePlots,
              returnData=TRUE
              )[returnElem]
    
}

autoRound <- function(x, rndTo=-1L, rndBucketsAuto=100, nSig=NULL) {

    # FUNCTION ARGUMENTS
    # x: vector to be rounded
    # rndTo: every number in x should be rounded to the nearest rndTo
    #        NULL means no rounding
    #        -1L means make an estimate based on data (default)
    # rndBucketsAuto: integer, if rndTo is -1L, about how many buckets are desired for predictions?
    # nSig: number of significant digits for automatically calculated rounding parameter
    #       (NULL means calculate exactly)
    
    # If rndTo is passed as NULL, return x as-is
    if(is.null(rndTo)) return(x)
    
    # If rndTo is passed as -1L, make an estimate for rndTo
    if(isTRUE(all.equal(-1L, rndTo))) {
        # Get the number of unique values in x
        nUq <- length(unique(x))
        # If the number of unique values is no more than 150% of rndToBucketsAuto, return as-is
        if(nUq <= 1.5*rndBucketsAuto) return(x)
        # Otherwise, calculate a value for rndTo
        rndTo <- diff(range(x)) / rndBucketsAuto
        # Truncate to requested number of significant digits
        if(!is.null(nSig)) rndTo <- signif(rndTo, digits=nSig)
    }
    
    # Return the rounded vector if it was not already returned
    return(round(x/rndTo)*rndTo)

}

Key mapping tables for available metrics are also copied:

hourlyMetrics <- "temperature_2m,relativehumidity_2m,dewpoint_2m,apparent_temperature,pressure_msl,surface_pressure,precipitation,rain,snowfall,cloudcover,cloudcover_low,cloudcover_mid,cloudcover_high,shortwave_radiation,direct_radiation,direct_normal_irradiance,diffuse_radiation,windspeed_10m,windspeed_100m,winddirection_10m,winddirection_100m,windgusts_10m,et0_fao_evapotranspiration,weathercode,vapor_pressure_deficit,soil_temperature_0_to_7cm,soil_temperature_7_to_28cm,soil_temperature_28_to_100cm,soil_temperature_100_to_255cm,soil_moisture_0_to_7cm,soil_moisture_7_to_28cm,soil_moisture_28_to_100cm,soil_moisture_100_to_255cm"
dailyMetrics <- "weathercode,temperature_2m_max,temperature_2m_min,apparent_temperature_max,apparent_temperature_min,precipitation_sum,rain_sum,snowfall_sum,precipitation_hours,sunrise,sunset,windspeed_10m_max,windgusts_10m_max,winddirection_10m_dominant,shortwave_radiation_sum,et0_fao_evapotranspiration"

hourlyDescription <- "Air temperature at 2 meters above ground\nRelative humidity at 2 meters above ground\nDew point temperature at 2 meters above ground\nApparent temperature is the perceived feels-like temperature combining wind chill factor, relative humidity and solar radiation\nAtmospheric air pressure reduced to mean sea level (msl) or pressure at surface. Typically pressure on mean sea level is used in meteorology. Surface pressure gets lower with increasing elevation.\nAtmospheric air pressure reduced to mean sea level (msl) or pressure at surface. Typically pressure on mean sea level is used in meteorology. Surface pressure gets lower with increasing elevation.\nTotal precipitation (rain, showers, snow) sum of the preceding hour. Data is stored with a 0.1 mm precision. If precipitation data is summed up to monthly sums, there might be small inconsistencies with the total precipitation amount.\nOnly liquid precipitation of the preceding hour including local showers and rain from large scale systems.\nSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nTotal cloud cover as an area fraction\nLow level clouds and fog up to 2 km altitude\nMid level clouds from 2 to 6 km altitude\nHigh level clouds from 6 km altitude\nShortwave solar radiation as average of the preceding hour. This is equal to the total global horizontal irradiation\nDirect solar radiation as average of the preceding hour on the horizontal plane and the normal plane (perpendicular to the sun)\nDirect solar radiation as average of the preceding hour on the horizontal plane and the normal plane (perpendicular to the sun)\nDiffuse solar radiation as average of the preceding hour\nWind speed at 10 or 100 meters above ground. Wind speed on 10 meters is the standard level.\nWind speed at 10 or 100 meters above ground. Wind speed on 10 meters is the standard level.\nWind direction at 10 or 100 meters above ground\nWind direction at 10 or 100 meters above ground\nGusts at 10 meters above ground of the indicated hour. Wind gusts in CERRA are defined as the maximum wind gusts of the preceding hour. Please consult the ECMWF IFS documentation for more information on how wind gusts are parameterized in weather models.\nET0 Reference Evapotranspiration of a well watered grass field. Based on FAO-56 Penman-Monteith equations ET0 is calculated from temperature, wind speed, humidity and solar radiation. Unlimited soil water is assumed. ET0 is commonly used to estimate the required irrigation for plants.\nWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details. Weather code is calculated from cloud cover analysis, precipitation and snowfall. As barely no information about atmospheric stability is available, estimation about thunderstorms is not possible.\nVapor Pressure Deificit (VPD) in kilopascal (kPa). For high VPD (>1.6), water transpiration of plants increases. For low VPD (<0.4), transpiration decreases\nAverage temperature of different soil levels below ground.\nAverage temperature of different soil levels below ground.\nAverage temperature of different soil levels below ground.\nAverage temperature of different soil levels below ground.\nAverage soil water content as volumetric mixing ratio at 0-7, 7-28, 28-100 and 100-255 cm depths.\nAverage soil water content as volumetric mixing ratio at 0-7, 7-28, 28-100 and 100-255 cm depths.\nAverage soil water content as volumetric mixing ratio at 0-7, 7-28, 28-100 and 100-255 cm depths.\nAverage soil water content as volumetric mixing ratio at 0-7, 7-28, 28-100 and 100-255 cm depths."
dailyDescription <- "The most severe weather condition on a given day\nMaximum and minimum daily air temperature at 2 meters above ground\nMaximum and minimum daily air temperature at 2 meters above ground\nMaximum and minimum daily apparent temperature\nMaximum and minimum daily apparent temperature\nSum of daily precipitation (including rain, showers and snowfall)\nSum of daily rain\nSum of daily snowfall\nThe number of hours with rain\nSun rise and set times\nSun rise and set times\nMaximum wind speed and gusts on a day\nMaximum wind speed and gusts on a day\nDominant wind direction\nThe sum of solar radiaion on a given day in Megajoules\nDaily sum of ET0 Reference Evapotranspiration of a well watered grass field"

# Create tibble for hourly metrics
tblMetricsHourly <- tibble::tibble(metric=hourlyMetrics %>% str_split_1(","), 
                                   description=hourlyDescription %>% str_split_1("\n")
                                   )
tblMetricsHourly %>% 
    print(n=50)
## # A tibble: 33 × 2
##    metric                        description                                    
##    <chr>                         <chr>                                          
##  1 temperature_2m                Air temperature at 2 meters above ground       
##  2 relativehumidity_2m           Relative humidity at 2 meters above ground     
##  3 dewpoint_2m                   Dew point temperature at 2 meters above ground 
##  4 apparent_temperature          Apparent temperature is the perceived feels-li…
##  5 pressure_msl                  Atmospheric air pressure reduced to mean sea l…
##  6 surface_pressure              Atmospheric air pressure reduced to mean sea l…
##  7 precipitation                 Total precipitation (rain, showers, snow) sum …
##  8 rain                          Only liquid precipitation of the preceding hou…
##  9 snowfall                      Snowfall amount of the preceding hour in centi…
## 10 cloudcover                    Total cloud cover as an area fraction          
## 11 cloudcover_low                Low level clouds and fog up to 2 km altitude   
## 12 cloudcover_mid                Mid level clouds from 2 to 6 km altitude       
## 13 cloudcover_high               High level clouds from 6 km altitude           
## 14 shortwave_radiation           Shortwave solar radiation as average of the pr…
## 15 direct_radiation              Direct solar radiation as average of the prece…
## 16 direct_normal_irradiance      Direct solar radiation as average of the prece…
## 17 diffuse_radiation             Diffuse solar radiation as average of the prec…
## 18 windspeed_10m                 Wind speed at 10 or 100 meters above ground. W…
## 19 windspeed_100m                Wind speed at 10 or 100 meters above ground. W…
## 20 winddirection_10m             Wind direction at 10 or 100 meters above ground
## 21 winddirection_100m            Wind direction at 10 or 100 meters above ground
## 22 windgusts_10m                 Gusts at 10 meters above ground of the indicat…
## 23 et0_fao_evapotranspiration    ET0 Reference Evapotranspiration of a well wat…
## 24 weathercode                   Weather condition as a numeric code. Follow WM…
## 25 vapor_pressure_deficit        Vapor Pressure Deificit (VPD) in kilopascal (k…
## 26 soil_temperature_0_to_7cm     Average temperature of different soil levels b…
## 27 soil_temperature_7_to_28cm    Average temperature of different soil levels b…
## 28 soil_temperature_28_to_100cm  Average temperature of different soil levels b…
## 29 soil_temperature_100_to_255cm Average temperature of different soil levels b…
## 30 soil_moisture_0_to_7cm        Average soil water content as volumetric mixin…
## 31 soil_moisture_7_to_28cm       Average soil water content as volumetric mixin…
## 32 soil_moisture_28_to_100cm     Average soil water content as volumetric mixin…
## 33 soil_moisture_100_to_255cm    Average soil water content as volumetric mixin…
# Create tibble for daily metrics
tblMetricsDaily <- tibble::tibble(metric=dailyMetrics %>% str_split_1(","), 
                                  description=dailyDescription %>% str_split_1("\n")
                                   )
tblMetricsDaily
## # A tibble: 16 × 2
##    metric                     description                                       
##    <chr>                      <chr>                                             
##  1 weathercode                The most severe weather condition on a given day  
##  2 temperature_2m_max         Maximum and minimum daily air temperature at 2 me…
##  3 temperature_2m_min         Maximum and minimum daily air temperature at 2 me…
##  4 apparent_temperature_max   Maximum and minimum daily apparent temperature    
##  5 apparent_temperature_min   Maximum and minimum daily apparent temperature    
##  6 precipitation_sum          Sum of daily precipitation (including rain, showe…
##  7 rain_sum                   Sum of daily rain                                 
##  8 snowfall_sum               Sum of daily snowfall                             
##  9 precipitation_hours        The number of hours with rain                     
## 10 sunrise                    Sun rise and set times                            
## 11 sunset                     Sun rise and set times                            
## 12 windspeed_10m_max          Maximum wind speed and gusts on a day             
## 13 windgusts_10m_max          Maximum wind speed and gusts on a day             
## 14 winddirection_10m_dominant Dominant wind direction                           
## 15 shortwave_radiation_sum    The sum of solar radiaion on a given day in Megaj…
## 16 et0_fao_evapotranspiration Daily sum of ET0 Reference Evapotranspiration of …

Core datasets (previously downloaded) are loaded, with explanatory variables added for future processing:

# Read daily JSON file
nycOMDaily <- readOpenMeteoJSON("testOM_daily_nyc.json")
## 
## Objects in JSON include: latitude, longitude, generationtime_ms, utc_offset_seconds, timezone, timezone_abbreviation, elevation, daily_units, daily
nycOMDaily
## $tblDaily
## # A tibble: 4,914 × 18
##    date       time       weathercode temperature_2m_max temperature_2m_min
##    <date>     <chr>            <int>              <dbl>              <dbl>
##  1 2010-01-01 2010-01-01          73                5                 -1.4
##  2 2010-01-02 2010-01-02          71               -0.6               -9.2
##  3 2010-01-03 2010-01-03          71               -4.8              -10  
##  4 2010-01-04 2010-01-04           1               -0.8               -7.3
##  5 2010-01-05 2010-01-05           1               -0.2               -7.3
##  6 2010-01-06 2010-01-06           2                1.1               -5.3
##  7 2010-01-07 2010-01-07           2                3.6               -3.7
##  8 2010-01-08 2010-01-08          71                1.9               -5.7
##  9 2010-01-09 2010-01-09           0               -1.4               -7.7
## 10 2010-01-10 2010-01-10           0               -1.7              -10.3
## # ℹ 4,904 more rows
## # ℹ 13 more variables: apparent_temperature_max <dbl>,
## #   apparent_temperature_min <dbl>, precipitation_sum <dbl>, rain_sum <dbl>,
## #   snowfall_sum <dbl>, precipitation_hours <dbl>, sunrise <chr>, sunset <chr>,
## #   windspeed_10m_max <dbl>, windgusts_10m_max <dbl>,
## #   winddirection_10m_dominant <int>, shortwave_radiation_sum <dbl>,
## #   et0_fao_evapotranspiration <dbl>
## 
## $tblHourly
## NULL
## 
## $tblUnits
## # A tibble: 17 × 4
##    metricType  name                       value      description                
##    <chr>       <chr>                      <chr>      <chr>                      
##  1 daily_units time                       "iso8601"  <NA>                       
##  2 daily_units weathercode                "wmo code" The most severe weather co…
##  3 daily_units temperature_2m_max         "deg C"    Maximum and minimum daily …
##  4 daily_units temperature_2m_min         "deg C"    Maximum and minimum daily …
##  5 daily_units apparent_temperature_max   "deg C"    Maximum and minimum daily …
##  6 daily_units apparent_temperature_min   "deg C"    Maximum and minimum daily …
##  7 daily_units precipitation_sum          "mm"       Sum of daily precipitation…
##  8 daily_units rain_sum                   "mm"       Sum of daily rain          
##  9 daily_units snowfall_sum               "cm"       Sum of daily snowfall      
## 10 daily_units precipitation_hours        "h"        The number of hours with r…
## 11 daily_units sunrise                    "iso8601"  Sun rise and set times     
## 12 daily_units sunset                     "iso8601"  Sun rise and set times     
## 13 daily_units windspeed_10m_max          "km/h"     Maximum wind speed and gus…
## 14 daily_units windgusts_10m_max          "km/h"     Maximum wind speed and gus…
## 15 daily_units winddirection_10m_dominant "deg "     Dominant wind direction    
## 16 daily_units shortwave_radiation_sum    "MJ/m²"    The sum of solar radiaion …
## 17 daily_units et0_fao_evapotranspiration "mm"       Daily sum of ET0 Reference…
## 
## $tblDescription
## # A tibble: 1 × 7
##   latitude longitude generationtime_ms utc_offset_seconds timezone        
##      <dbl>     <dbl>             <dbl>              <int> <chr>           
## 1     40.7     -73.9              101.             -14400 America/New_York
## # ℹ 2 more variables: timezone_abbreviation <chr>, elevation <dbl>
prettyOpenMeteoMeta(nycOMDaily)
## 
## latitude: 40.7
## longitude: -73.9
## generationtime_ms: 100.914
## utc_offset_seconds: -14400
## timezone: America/New_York
## timezone_abbreviation: EDT
## elevation: 36
# Read hourly JSON file
nycOMHourly <- readOpenMeteoJSON("testOM_hourly_nyc.json")
## 
## Objects in JSON include: latitude, longitude, generationtime_ms, utc_offset_seconds, timezone, timezone_abbreviation, elevation, hourly_units, hourly
nycOMHourly
## $tblDaily
## NULL
## 
## $tblHourly
## # A tibble: 117,936 × 37
##    time                date        hour temperature_2m relativehumidity_2m
##    <dttm>              <date>     <int>          <dbl>               <int>
##  1 2010-01-01 00:00:00 2010-01-01     0           -1.1                  95
##  2 2010-01-01 01:00:00 2010-01-01     1           -1                    96
##  3 2010-01-01 02:00:00 2010-01-01     2           -1                    96
##  4 2010-01-01 03:00:00 2010-01-01     3           -0.8                  97
##  5 2010-01-01 04:00:00 2010-01-01     4           -0.9                  97
##  6 2010-01-01 05:00:00 2010-01-01     5           -0.8                  97
##  7 2010-01-01 06:00:00 2010-01-01     6           -0.7                  97
##  8 2010-01-01 07:00:00 2010-01-01     7           -0.5                  97
##  9 2010-01-01 08:00:00 2010-01-01     8           -0.6                  97
## 10 2010-01-01 09:00:00 2010-01-01     9           -0.6                  97
## # ℹ 117,926 more rows
## # ℹ 32 more variables: dewpoint_2m <dbl>, apparent_temperature <dbl>,
## #   pressure_msl <dbl>, surface_pressure <dbl>, precipitation <dbl>,
## #   rain <dbl>, snowfall <dbl>, cloudcover <int>, cloudcover_low <int>,
## #   cloudcover_mid <int>, cloudcover_high <int>, shortwave_radiation <dbl>,
## #   direct_radiation <dbl>, direct_normal_irradiance <dbl>,
## #   diffuse_radiation <dbl>, windspeed_10m <dbl>, windspeed_100m <dbl>, …
## 
## $tblUnits
## # A tibble: 34 × 4
##    metricType   name                 value   description                        
##    <chr>        <chr>                <chr>   <chr>                              
##  1 hourly_units time                 iso8601 <NA>                               
##  2 hourly_units temperature_2m       deg C   Air temperature at 2 meters above …
##  3 hourly_units relativehumidity_2m  %       Relative humidity at 2 meters abov…
##  4 hourly_units dewpoint_2m          deg C   Dew point temperature at 2 meters …
##  5 hourly_units apparent_temperature deg C   Apparent temperature is the percei…
##  6 hourly_units pressure_msl         hPa     Atmospheric air pressure reduced t…
##  7 hourly_units surface_pressure     hPa     Atmospheric air pressure reduced t…
##  8 hourly_units precipitation        mm      Total precipitation (rain, showers…
##  9 hourly_units rain                 mm      Only liquid precipitation of the p…
## 10 hourly_units snowfall             cm      Snowfall amount of the preceding h…
## # ℹ 24 more rows
## 
## $tblDescription
## # A tibble: 1 × 7
##   latitude longitude generationtime_ms utc_offset_seconds timezone        
##      <dbl>     <dbl>             <dbl>              <int> <chr>           
## 1     40.7     -73.9              118.             -14400 America/New_York
## # ℹ 2 more variables: timezone_abbreviation <chr>, elevation <dbl>
prettyOpenMeteoMeta(nycOMHourly)
## 
## latitude: 40.7
## longitude: -73.9
## generationtime_ms: 118.0021
## utc_offset_seconds: -14400
## timezone: America/New_York
## timezone_abbreviation: EDT
## elevation: 36
# Create percentiles for numeric variables
nycTemp <- nycOMHourly$tblHourly %>%
    mutate(year=year(date), 
           month=factor(month.abb[lubridate::month(date)], levels=month.abb), 
           hour=lubridate::hour(time), 
           fct_hour=factor(hour), 
           tod=ifelse(hour>=7 & hour<=18, "Day", "Night"), 
           season=case_when(month %in% c("Mar", "Apr", "May") ~ "Spring", 
                            month %in% c("Jun", "Jul", "Aug") ~ "Summer", 
                            month %in% c("Sep", "Oct", "Nov") ~ "Fall", 
                            month %in% c("Dec", "Jan", "Feb") ~ "Winter", 
                            TRUE~"typo"
                            ), 
           todSeason=paste0(season, "-", tod), 
           tod=factor(tod, levels=c("Day", "Night")), 
           season=factor(season, levels=c("Spring", "Summer", "Fall", "Winter")), 
           todSeason=factor(todSeason, 
                            levels=paste0(rep(c("Spring", "Summer", "Fall", "Winter"), each=2), 
                                          "-", 
                                          c("Day", "Night")
                                          )
                            ),
           across(where(is.numeric), .fns=function(x) round(100*percent_rank(x)), .names="pct_{.col}")
           )

glimpse(nycTemp)
## Rows: 117,936
## Columns: 78
## $ time                              <dttm> 2010-01-01 00:00:00, 2010-01-01 01:…
## $ date                              <date> 2010-01-01, 2010-01-01, 2010-01-01,…
## $ hour                              <int> 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11…
## $ temperature_2m                    <dbl> -1.1, -1.0, -1.0, -0.8, -0.9, -0.8, …
## $ relativehumidity_2m               <int> 95, 96, 96, 97, 97, 97, 97, 97, 97, …
## $ dewpoint_2m                       <dbl> -1.7, -1.6, -1.6, -1.2, -1.3, -1.2, …
## $ apparent_temperature              <dbl> -3.9, -3.9, -3.9, -3.7, -3.7, -3.6, …
## $ pressure_msl                      <dbl> 1017.2, 1016.5, 1015.9, 1015.6, 1015…
## $ surface_pressure                  <dbl> 1012.6, 1011.9, 1011.3, 1011.0, 1011…
## $ precipitation                     <dbl> 0.5, 0.5, 0.4, 0.3, 0.1, 0.0, 0.0, 0…
## $ rain                              <dbl> 0.0, 0.1, 0.1, 0.1, 0.0, 0.0, 0.0, 0…
## $ snowfall                          <dbl> 0.35, 0.28, 0.21, 0.14, 0.07, 0.00, …
## $ cloudcover                        <int> 90, 93, 80, 68, 71, 100, 100, 100, 1…
## $ cloudcover_low                    <int> 2, 8, 3, 6, 15, 51, 99, 99, 96, 77, …
## $ cloudcover_mid                    <int> 98, 96, 99, 98, 95, 97, 98, 99, 94, …
## $ cloudcover_high                   <int> 97, 93, 59, 13, 0, 0, 0, 0, 0, 0, 0,…
## $ shortwave_radiation               <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 53, 11…
## $ direct_radiation                  <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 20…
## $ direct_normal_irradiance          <dbl> 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0…
## $ diffuse_radiation                 <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 41, 93…
## $ windspeed_10m                     <dbl> 3.1, 3.5, 3.3, 3.9, 3.5, 3.4, 0.0, 1…
## $ windspeed_100m                    <dbl> 3.8, 3.1, 3.8, 4.7, 6.4, 5.7, 1.4, 1…
## $ winddirection_10m                 <int> 339, 336, 347, 338, 336, 342, 180, 2…
## $ winddirection_100m                <int> 41, 21, 17, 356, 344, 342, 360, 217,…
## $ windgusts_10m                     <dbl> 9.0, 9.7, 10.1, 7.6, 7.6, 6.8, 5.4, …
## $ et0_fao_evapotranspiration        <dbl> 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, …
## $ weathercode                       <int> 73, 73, 73, 71, 71, 3, 3, 3, 3, 3, 3…
## $ vapor_pressure_deficit            <dbl> 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, …
## $ soil_temperature_0_to_7cm         <dbl> -0.7, -0.7, -0.7, -0.6, -0.6, -0.6, …
## $ soil_temperature_7_to_28cm        <dbl> 0.1, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0…
## $ soil_temperature_28_to_100cm      <dbl> 4.2, 4.2, 4.1, 4.1, 4.1, 4.1, 4.1, 4…
## $ soil_temperature_100_to_255cm     <dbl> 10.6, 10.6, 10.6, 10.6, 10.6, 10.6, …
## $ soil_moisture_0_to_7cm            <dbl> 0.373, 0.374, 0.376, 0.377, 0.377, 0…
## $ soil_moisture_7_to_28cm           <dbl> 0.377, 0.377, 0.377, 0.377, 0.377, 0…
## $ soil_moisture_28_to_100cm         <dbl> 0.413, 0.413, 0.413, 0.413, 0.413, 0…
## $ soil_moisture_100_to_255cm        <dbl> 0.412, 0.412, 0.412, 0.412, 0.412, 0…
## $ origTime                          <chr> "2010-01-01T00:00", "2010-01-01T01:0…
## $ year                              <dbl> 2010, 2010, 2010, 2010, 2010, 2010, …
## $ month                             <fct> Jan, Jan, Jan, Jan, Jan, Jan, Jan, J…
## $ fct_hour                          <fct> 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11…
## $ tod                               <fct> Night, Night, Night, Night, Night, N…
## $ season                            <fct> Winter, Winter, Winter, Winter, Wint…
## $ todSeason                         <fct> Winter-Night, Winter-Night, Winter-N…
## $ pct_hour                          <dbl> 0, 4, 8, 13, 17, 21, 25, 29, 33, 38,…
## $ pct_temperature_2m                <dbl> 10, 10, 10, 11, 11, 11, 11, 12, 11, …
## $ pct_relativehumidity_2m           <dbl> 92, 94, 94, 96, 96, 96, 96, 96, 96, …
## $ pct_dewpoint_2m                   <dbl> 23, 24, 24, 25, 25, 25, 25, 25, 25, …
## $ pct_apparent_temperature          <dbl> 15, 15, 15, 15, 15, 15, 17, 17, 16, …
## $ pct_pressure_msl                  <dbl> 53, 49, 46, 44, 44, 41, 38, 36, 37, …
## $ pct_surface_pressure              <dbl> 51, 47, 44, 42, 42, 39, 36, 35, 36, …
## $ pct_precipitation                 <dbl> 93, 93, 92, 90, 86, 0, 0, 0, 0, 0, 0…
## $ pct_rain                          <dbl> 0, 87, 87, 87, 0, 0, 0, 0, 0, 0, 0, …
## $ pct_snowfall                      <dbl> 99, 99, 99, 99, 98, 0, 0, 0, 0, 0, 0…
## $ pct_cloudcover                    <dbl> 77, 79, 72, 67, 68, 81, 81, 81, 81, …
## $ pct_cloudcover_low                <dbl> 51, 60, 53, 58, 65, 77, 90, 90, 88, …
## $ pct_cloudcover_mid                <dbl> 90, 89, 92, 90, 88, 89, 90, 92, 87, …
## $ pct_cloudcover_high               <dbl> 81, 76, 63, 49, 0, 0, 0, 0, 0, 0, 0,…
## $ pct_shortwave_radiation           <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 57, 6…
## $ pct_direct_radiation              <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60, 62…
## $ pct_direct_normal_irradiance      <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61, 61…
## $ pct_diffuse_radiation             <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 58, 7…
## $ pct_windspeed_10m                 <dbl> 3, 4, 3, 5, 4, 4, 0, 1, 2, 5, 8, 8, …
## $ pct_windspeed_100m                <dbl> 2, 1, 2, 3, 6, 5, 0, 0, 4, 9, 9, 8, …
## $ pct_winddirection_10m             <dbl> 94, 93, 96, 94, 93, 95, 35, 43, 53, …
## $ pct_winddirection_100m            <dbl> 8, 4, 3, 99, 96, 95, 100, 46, 51, 61…
## $ pct_windgusts_10m                 <dbl> 3, 4, 5, 1, 1, 1, 0, 0, 0, 1, 2, 4, …
## $ pct_et0_fao_evapotranspiration    <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 32, 4…
## $ pct_weathercode                   <dbl> 99, 99, 99, 98, 98, 69, 69, 69, 69, …
## $ pct_vapor_pressure_deficit        <dbl> 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 8, …
## $ pct_soil_temperature_0_to_7cm     <dbl> 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 9, 10,…
## $ pct_soil_temperature_7_to_28cm    <dbl> 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, …
## $ pct_soil_temperature_28_to_100cm  <dbl> 16, 16, 15, 15, 15, 15, 15, 15, 15, …
## $ pct_soil_temperature_100_to_255cm <dbl> 42, 42, 42, 42, 42, 42, 42, 42, 42, …
## $ pct_soil_moisture_0_to_7cm        <dbl> 70, 71, 73, 74, 74, 74, 74, 74, 73, …
## $ pct_soil_moisture_7_to_28cm       <dbl> 69, 69, 69, 69, 69, 68, 68, 68, 68, …
## $ pct_soil_moisture_28_to_100cm     <dbl> 96, 96, 96, 96, 96, 96, 96, 96, 96, …
## $ pct_soil_moisture_100_to_255cm    <dbl> 96, 96, 96, 96, 96, 96, 96, 96, 96, …
## $ pct_year                          <dbl> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, …
nycTemp %>% 
    count(year, month) %>% 
    ggplot(aes(y=factor(year), x=month)) + 
    geom_tile(aes(fill=n)) + 
    geom_text(aes(label=n), size=3) + 
    scale_fill_continuous("# Records", low="white", high="green") + 
    labs(title="Records by year and month", x=NULL, y=NULL)

nycTemp %>% count(todSeason, season, tod)
## # A tibble: 8 × 4
##   todSeason    season tod       n
##   <fct>        <fct>  <fct> <int>
## 1 Spring-Day   Spring Day   15456
## 2 Spring-Night Spring Night 15456
## 3 Summer-Day   Summer Day   14532
## 4 Summer-Night Summer Night 14532
## 5 Fall-Day     Fall   Day   14196
## 6 Fall-Night   Fall   Night 14196
## 7 Winter-Day   Winter Day   14784
## 8 Winter-Night Winter Night 14784
nycTemp %>% count(hour, fct_hour, tod) %>% print(n=30)
## # A tibble: 24 × 4
##     hour fct_hour tod       n
##    <int> <fct>    <fct> <int>
##  1     0 0        Night  4914
##  2     1 1        Night  4914
##  3     2 2        Night  4914
##  4     3 3        Night  4914
##  5     4 4        Night  4914
##  6     5 5        Night  4914
##  7     6 6        Night  4914
##  8     7 7        Day    4914
##  9     8 8        Day    4914
## 10     9 9        Day    4914
## 11    10 10       Day    4914
## 12    11 11       Day    4914
## 13    12 12       Day    4914
## 14    13 13       Day    4914
## 15    14 14       Day    4914
## 16    15 15       Day    4914
## 17    16 16       Day    4914
## 18    17 17       Day    4914
## 19    18 18       Day    4914
## 20    19 19       Night  4914
## 21    20 20       Night  4914
## 22    21 21       Night  4914
## 23    22 22       Night  4914
## 24    23 23       Night  4914
nycTemp %>% count(month, season)
## # A tibble: 12 × 3
##    month season     n
##    <fct> <fct>  <int>
##  1 Jan   Winter 10416
##  2 Feb   Winter  9480
##  3 Mar   Spring 10416
##  4 Apr   Spring 10080
##  5 May   Spring 10416
##  6 Jun   Summer  9720
##  7 Jul   Summer  9672
##  8 Aug   Summer  9672
##  9 Sep   Fall    9360
## 10 Oct   Fall    9672
## 11 Nov   Fall    9360
## 12 Dec   Winter  9672
# Add random variables to dataset, then split in to test and train
set.seed(24020416)
nycTempRand <- nycTemp %>%
    mutate(pct_0005=sample(0:5, size=nrow(.), replace=TRUE),
           pct_0025=sample(0:25, size=nrow(.), replace=TRUE), 
           pct_0100=sample(0:100, size=nrow(.), replace=TRUE), 
           pct_0250=sample(0:250, size=nrow(.), replace=TRUE),
           pct_0500=sample(0:500, size=nrow(.), replace=TRUE), 
           pct_1000=sample(0:1000, size=nrow(.), replace=TRUE), 
           pct_2500=sample(0:2500, size=nrow(.), replace=TRUE), 
           pct_5000=sample(0:5000, size=nrow(.), replace=TRUE)
           )

# Split in to test and train data (3:1 split in favor of test)
idxTrain <- sort(sample(1:nrow(nycTempRand), size=round(0.75*nrow(nycTempRand)), replace=FALSE))
nycTempTrain <- nycTempRand[idxTrain, ]
nycTempTest <- nycTempRand[-idxTrain, ]

Holdout data are created from a succeeding year, and the function is tested on categorical variable month:

# Create holdout data and labels
dfTrain <- filter(nycTempTrain, lubridate::year(date)<2022) %>% mutate(doy=yday(date))
dfTest <- filter(bind_rows(nycTempTest, nycTempTrain), lubridate::year(date)==2022) %>% mutate(doy=yday(date))
keyLabel <- "predictions based on pre-2022 training data applied to 2022 holdout dataset"

# Create set of relevant training variables
varsTrain <- nycTempTrain %>%
    select(starts_with("pct")) %>%
    select(-pct_hour, -pct_weathercode, -pct_year, -ends_with("0"), -ends_with("5")) %>%
    names()
varsTrain
##  [1] "pct_temperature_2m"                "pct_relativehumidity_2m"          
##  [3] "pct_dewpoint_2m"                   "pct_apparent_temperature"         
##  [5] "pct_pressure_msl"                  "pct_surface_pressure"             
##  [7] "pct_precipitation"                 "pct_rain"                         
##  [9] "pct_snowfall"                      "pct_cloudcover"                   
## [11] "pct_cloudcover_low"                "pct_cloudcover_mid"               
## [13] "pct_cloudcover_high"               "pct_shortwave_radiation"          
## [15] "pct_direct_radiation"              "pct_direct_normal_irradiance"     
## [17] "pct_diffuse_radiation"             "pct_windspeed_10m"                
## [19] "pct_windspeed_100m"                "pct_winddirection_10m"            
## [21] "pct_winddirection_100m"            "pct_windgusts_10m"                
## [23] "pct_et0_fao_evapotranspiration"    "pct_vapor_pressure_deficit"       
## [25] "pct_soil_temperature_0_to_7cm"     "pct_soil_temperature_7_to_28cm"   
## [27] "pct_soil_temperature_28_to_100cm"  "pct_soil_temperature_100_to_255cm"
## [29] "pct_soil_moisture_0_to_7cm"        "pct_soil_moisture_7_to_28cm"      
## [31] "pct_soil_moisture_28_to_100cm"     "pct_soil_moisture_100_to_255cm"
rfMonth <- runFullRF(dfTrain=dfTrain, 
                     yVar="month", 
                     xVars=varsTrain, 
                     dfTest=dfTest, 
                     useLabel=keyLabel, 
                     useSub=stringr::str_to_sentence(keyLabel), 
                     returnData=TRUE
                     )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.774%

The function is tested on continuous variable temperature:

rfTemp2m <- runFullRF(dfTrain=dfTrain, 
                      yVar="temperature_2m", 
                      xVars=c(varsTrain[!str_detect(varsTrain, "pct_temp|apparent")], "month", "tod", "doy"), 
                      dfTest=dfTest, 
                      useLabel=keyLabel, 
                      useSub=stringr::str_to_sentence(keyLabel), 
                      isContVar=TRUE,
                      rndTo=-1L,
                      refXY=TRUE,
                      returnData=TRUE
                      )

## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.239% (RMSE 0.9 vs. 10.3 null)
## `geom_smooth()` using formula = 'y ~ x'

The function is tested on continuous variable soil temperature:

rfSoil255 <- runFullRF(dfTrain=dfTrain, 
                       yVar="soil_temperature_100_to_255cm", 
                       xVars=c(varsTrain[!str_detect(varsTrain, "pct_soil_temp")], "month", "tod", "doy"), 
                       dfTest=dfTest, 
                       useLabel=keyLabel, 
                       useSub=stringr::str_to_sentence(keyLabel), 
                       isContVar=TRUE,
                       rndTo=-1L,
                       refXY=TRUE,
                       returnData=TRUE
                       )

## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.55% (RMSE 0.91 vs. 5.84 null)
## `geom_smooth()` using formula = 'y ~ x'

Variable importance is extracted, and cumulative variance explained is explored:

dfImp <- map_dfr(list("month"=rfMonth, 
                      "temperature_2m"=rfTemp2m, 
                      "soil_temperature_100_to_255cm"=rfSoil255
                      ), 
                 .f=function(x) x$rfImp, 
                 .id="src"
                 ) %>% 
    arrange(src, -imp) %>% 
    group_by(src) %>%
    mutate(pct=imp/sum(imp), cspct=cumsum(pct), n=row_number()) %>%
    ungroup()
dfImp
## # A tibble: 96 × 6
##    src   metric                               imp    pct cspct     n
##    <chr> <chr>                              <dbl>  <dbl> <dbl> <int>
##  1 month pct_soil_temperature_100_to_255cm 19868. 0.275  0.275     1
##  2 month pct_soil_temperature_28_to_100cm  12497. 0.173  0.448     2
##  3 month pct_soil_moisture_100_to_255cm     7197. 0.0996 0.547     3
##  4 month pct_soil_temperature_7_to_28cm     6355. 0.0879 0.635     4
##  5 month pct_soil_moisture_28_to_100cm      4874. 0.0674 0.703     5
##  6 month pct_soil_temperature_0_to_7cm      2780. 0.0385 0.741     6
##  7 month pct_soil_moisture_7_to_28cm        2655. 0.0367 0.778     7
##  8 month pct_soil_moisture_0_to_7cm         2013. 0.0279 0.806     8
##  9 month pct_apparent_temperature           1967. 0.0272 0.833     9
## 10 month pct_temperature_2m                 1652. 0.0229 0.856    10
## # ℹ 86 more rows
dfImp %>%
    select(src, n, cspct) %>%
    bind_rows(group_by(., src) %>% filter(n==1) %>% mutate(n=0, cspct=0) %>% ungroup) %>%
    ggplot(aes(x=n, y=cspct)) +
    geom_line(aes(group=src, color=src)) + 
    labs(x="# Variables", y="Cumulative %", title="Cumulative Variance Explained vs. # Variables") + 
    scale_color_discrete("Dependent\nVariable")

Month is predicted using only the four top importance variables:

runPartialImportanceRF(dfTrain=dfTrain, 
                       yVar="month", 
                       dfTest=dfTest, 
                       isContVar=FALSE,
                       impDB=dfImp,
                       nImp=4, 
                       makePlots=TRUE
                       )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 73.014%

## $rfImp
## # A tibble: 4 × 2
##   metric                               imp
##   <chr>                              <dbl>
## 1 pct_soil_temperature_100_to_255cm 31139.
## 2 pct_soil_temperature_28_to_100cm  20356.
## 3 pct_soil_moisture_100_to_255cm    10427.
## 4 pct_soil_temperature_7_to_28cm     9470.
## 
## $rfAcc
## [1] 0.730137

Prediction accuracy is decreased by ~10% as predictors are limited to 4. A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="month")))

# Accuracy on holdout data
rpiMonth <- tibble::tibble(nImp=impNums, 
                           rfAcc=sapply(impNums, 
                                        FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain, 
                                                                               yVar="month", 
                                                                               dfTest=dfTest, 
                                                                               isContVar=FALSE, 
                                                                               impDB=dfImp, 
                                                                               nImp=x, 
                                                                               makePlots=FALSE)[["rfAcc"]]
                                        )
                           )
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 42.443%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 75.765%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 73.47%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 72.9%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 80.068%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 81.724%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.934%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 81.256%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.785%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.249%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.922%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 83.037%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 83.39%
rpiMonth
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.424
##  2     2 0.758
##  3     3 0.735
##  4     4 0.729
##  5     5 0.801
##  6     6 0.817
##  7     7 0.829
##  8     8 0.813
##  9     9 0.828
## 10    10 0.822
## 11    16 0.829
## 12    25 0.830
## 13    32 0.834
# Plot of holdout accuracy vs. number of variables
rpiMonth %>%
    bind_rows(tibble::tibble(nImp=0, rfAcc=0)) %>%
    ggplot(aes(x=nImp, y=rfAcc)) + 
    geom_line() + 
    geom_point() + 
    labs(title="Accuracy on holdout data vs. number of predictors", 
         subtitle="Predicting month",
         y="Accuracy on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(0, 1)) + 
    geom_hline(data=~filter(., rfAcc==max(rfAcc)), aes(yintercept=rfAcc), lty=2)

# Correlations
dfTrain %>% 
    select(all_of(varsTrain)) %>% 
    cor() %>% 
    as.data.frame() %>% 
    rownames_to_column("V1") %>% 
    tibble::tibble() %>% 
    pivot_longer(cols=-c(V1), names_to="V2") %>% 
    filter(V1 %in% pull(filter(dfImp, src=="month", n<=8), "metric"), 
           V2 %in% pull(filter(dfImp, src=="month", n<=8), "metric")
           ) %>% 
    ggplot(aes(x=fct_rev(V1), y=V2)) + 
    geom_tile(aes(fill=value)) + 
    geom_text(aes(label=round(value, 2))) + 
    scale_fill_gradient2(high="green") + 
    labs(title="Correlations of select predictors in training data", x=NULL, y=NULL)

Accuracy on holdout data is not monotonically increasing with number of predictors (sorted by original variable importance). Maximum accuracy is reached with ~7 predictors. Filtering or transforming to account for correlated predictors could be merited

Temperature is predicted using only the four top importance variables:

runPartialImportanceRF(dfTrain=dfTrain, 
                       yVar="temperature_2m", 
                       dfTest=dfTest, 
                       isContVar=TRUE,
                       impDB=dfImp,
                       nImp=4, 
                       makePlots=TRUE, 
                       rndTo=-1L, 
                       refXY=TRUE
                       )

## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.395% (RMSE 1.31 vs. 10.3 null)
## `geom_smooth()` using formula = 'y ~ x'

## $rfImp
## # A tibble: 4 × 2
##   metric                                imp
##   <chr>                               <dbl>
## 1 pct_soil_temperature_0_to_7cm    4222109.
## 2 pct_dewpoint_2m                  1433530.
## 3 pct_soil_temperature_7_to_28cm   2347242.
## 4 pct_soil_temperature_28_to_100cm   58720.
## 
## $rfAcc
##     mseNull     msePred          r2 
## 106.1501502   1.7033478   0.9839534

A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="temperature_2m")))

# Accuracy on holdout data
rpiTemp <- tibble::tibble(nImp=impNums, 
                          r2=sapply(impNums, 
                                    FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain, 
                                                                           yVar="temperature_2m", 
                                                                           dfTest=dfTest, 
                                                                           isContVar=TRUE, 
                                                                           impDB=dfImp, 
                                                                           nImp=x, 
                                                                           makePlots=FALSE)[["rfAcc"]]["r2"]
                                    )
                          )
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 96.137% (RMSE 2.02 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.079% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.268% (RMSE 1.36 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.4% (RMSE 1.3 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.501% (RMSE 1.26 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.823% (RMSE 0.43 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.74% (RMSE 0.53 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.657% (RMSE 0.6 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.799% (RMSE 0.46 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.769% (RMSE 0.5 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.767% (RMSE 0.5 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.488% (RMSE 0.74 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.238% (RMSE 0.9 vs. 10.3 null)
rpiTemp
## # A tibble: 13 × 2
##     nImp    r2
##    <dbl> <dbl>
##  1     1 0.961
##  2     2 0.981
##  3     3 0.983
##  4     4 0.984
##  5     5 0.985
##  6     6 0.998
##  7     7 0.997
##  8     8 0.997
##  9     9 0.998
## 10    10 0.998
## 11    16 0.998
## 12    25 0.995
## 13    33 0.992
# Plot of holdout accuracy vs. number of variables
rpiTemp %>%
    bind_rows(tibble::tibble(nImp=0, r2=0)) %>%
    ggplot(aes(x=nImp, y=r2)) + 
    geom_line(data=~filter(., nImp>0)) + 
    geom_point(data=~filter(., nImp>0)) + 
    labs(title="R-squared on holdout data vs. number of predictors", 
         subtitle="Predicting temperature",
         y="R-squared on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(NA, 1)) + 
    geom_hline(data=~filter(., r2==max(r2)), aes(yintercept=r2), lty=2)

Soil temperature is predicted using only the four top importance variables:

runPartialImportanceRF(dfTrain=dfTrain, 
                       yVar="soil_temperature_100_to_255cm", 
                       dfTest=dfTest, 
                       isContVar=TRUE,
                       impDB=dfImp,
                       nImp=4, 
                       makePlots=TRUE, 
                       rndTo=-1L, 
                       refXY=TRUE
                       )

## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.013% (RMSE 1.01 vs. 5.84 null)
## `geom_smooth()` using formula = 'y ~ x'

## $rfImp
## # A tibble: 4 × 2
##   metric                              imp
##   <chr>                             <dbl>
## 1 doy                            1369981.
## 2 month                           956018.
## 3 pct_soil_moisture_100_to_255cm  250564.
## 4 pct_soil_moisture_28_to_100cm    51323.
## 
## $rfAcc
##    mseNull    msePred         r2 
## 34.1488705  1.0199375  0.9701326

A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="soil_temperature_100_to_255cm")))

# Accuracy on holdout data
rpiSoil <- tibble::tibble(nImp=impNums, 
                          r2=sapply(impNums, 
                                    FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain, 
                                                                           yVar="soil_temperature_100_to_255cm", 
                                                                           dfTest=dfTest, 
                                                                           isContVar=TRUE, 
                                                                           impDB=dfImp, 
                                                                           nImp=x, 
                                                                           makePlots=FALSE)[["rfAcc"]]["r2"]
                                    )
                          )
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.804% (RMSE 0.64 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.372% (RMSE 0.75 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.553% (RMSE 0.91 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.021% (RMSE 1.01 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.137% (RMSE 0.99 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.205% (RMSE 0.98 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.233% (RMSE 0.97 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.29% (RMSE 0.96 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.312% (RMSE 0.96 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.369% (RMSE 0.95 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.419% (RMSE 0.94 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.524% (RMSE 0.92 vs. 5.84 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 97.588% (RMSE 0.91 vs. 5.84 null)
rpiSoil
## # A tibble: 13 × 2
##     nImp    r2
##    <dbl> <dbl>
##  1     1 0.988
##  2     2 0.984
##  3     3 0.976
##  4     4 0.970
##  5     5 0.971
##  6     6 0.972
##  7     7 0.972
##  8     8 0.973
##  9     9 0.973
## 10    10 0.974
## 11    16 0.974
## 12    25 0.975
## 13    31 0.976
# Plot of holdout accuracy vs. number of variables
rpiSoil %>%
    bind_rows(tibble::tibble(nImp=0, r2=0)) %>%
    ggplot(aes(x=nImp, y=r2)) + 
    geom_line(data=~filter(., nImp>0)) + 
    geom_point(data=~filter(., nImp>0)) + 
    labs(title="R-squared on holdout data vs. number of predictors", 
         subtitle="Predicting deep soil temperature",
         y="R-squared on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(NA, 1)) + 
    geom_hline(data=~filter(., r2==max(r2)), aes(yintercept=r2), lty=2)

Deep soil temperature is so seasonal that using the best predictor (day of year) drives slightly more accurate predictions than models using day of year and multiple other features

The function is tested on variable day of year, as an integer, without access to month:

rfDOYInt <- runFullRF(dfTrain=dfTrain, 
                      yVar="doy", 
                      xVars=c(varsTrain[!str_detect(varsTrain, "doy")], "tod"), 
                      dfTest=dfTest, 
                      useLabel=keyLabel, 
                      useSub=stringr::str_to_sentence(keyLabel), 
                      isContVar=TRUE,
                      rndTo=1,
                      refXY=TRUE,
                      returnData=TRUE
                      )

## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 80.752% (RMSE 46.23 vs. 105.37 null)
## `geom_smooth()` using formula = 'y ~ x'

rfDOYInt$tstPred %>%
    group_by(month) %>%
    summarize(doyrmse=mean((pred-doy)**2)**0.5, n=n(), .groups="drop")
## # A tibble: 12 × 3
##    month doyrmse     n
##    <fct>   <dbl> <int>
##  1 Jan    146.     744
##  2 Feb      8.54   672
##  3 Mar     10.5    744
##  4 Apr      4.69   720
##  5 May      5.34   744
##  6 Jun      4.20   720
##  7 Jul      3.72   744
##  8 Aug      4.38   744
##  9 Sep      6.67   720
## 10 Oct      6.23   744
## 11 Nov      6.68   720
## 12 Dec     59.3    744

Not surprisingly, predictions for winter have high error, as there is little difference between day 360 and day 5. Predictions outside of winter are generally within 5-10 days, based primarily on soil characteristics

The function is tested on variable day of year, rounded to nearest 10 days and converted to factor, without access to month:

rfDOYFct <- runFullRF(dfTrain=dfTrain %>% filter(doy<=365) %>% mutate(doy=factor(10*round(doy/10))), 
                      yVar="doy", 
                      xVars=c(varsTrain[!str_detect(varsTrain, "doy")], "tod"), 
                      dfTest=dfTest %>% filter(doy<=365) %>% mutate(doy=factor(10*round(doy/10))), 
                      useLabel=keyLabel, 
                      useSub=stringr::str_to_sentence(keyLabel), 
                      isContVar=FALSE,
                      rndTo=NULL,
                      returnData=TRUE
                      )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 47.648%

rfDOYFct$tstPred %>%
    group_by(month) %>%
    summarize(doyacc=mean(doy==pred), 
              doy10acc=mean(abs(as.integer(as.character(doy))-as.integer(as.character(pred))) %in% c(0, 10, 360)),
              n=n(), 
              .groups="drop"
              )
## # A tibble: 12 × 4
##    month doyacc doy10acc     n
##    <fct>  <dbl>    <dbl> <int>
##  1 Jan    0.472    0.849   744
##  2 Feb    0.278    0.815   672
##  3 Mar    0.140    0.551   744
##  4 Apr    0.492    0.964   720
##  5 May    0.535    1       744
##  6 Jun    0.622    1       720
##  7 Jul    0.526    0.996   744
##  8 Aug    0.367    0.958   744
##  9 Sep    0.328    0.817   720
## 10 Oct    0.585    0.964   744
## 11 Nov    0.746    1       720
## 12 Dec    0.618    0.993   744

Soil temperature and moisture patterns generally make day of year predictions accurate to within 10-20 days

Day of year is predicted using only the four top importance variables:

dfImp <- map_dfr(list("month"=rfMonth, 
                      "temperature_2m"=rfTemp2m, 
                      "soil_temperature_100_to_255cm"=rfSoil255, 
                      "doy"=rfDOYFct
                      ), 
                 .f=function(x) x$rfImp, 
                 .id="src"
                 ) %>% 
    arrange(src, -imp) %>% 
    group_by(src) %>%
    mutate(pct=imp/sum(imp), cspct=cumsum(pct), n=row_number()) %>%
    ungroup()
dfImp
## # A tibble: 129 × 6
##    src   metric                               imp    pct cspct     n
##    <chr> <chr>                              <dbl>  <dbl> <dbl> <int>
##  1 doy   pct_soil_temperature_100_to_255cm 15156. 0.198  0.198     1
##  2 doy   pct_soil_temperature_28_to_100cm  10393. 0.136  0.333     2
##  3 doy   pct_soil_moisture_100_to_255cm     7681. 0.100  0.434     3
##  4 doy   pct_soil_moisture_28_to_100cm      6656. 0.0869 0.521     4
##  5 doy   pct_soil_temperature_7_to_28cm     4736. 0.0618 0.582     5
##  6 doy   pct_soil_moisture_7_to_28cm        4630. 0.0604 0.643     6
##  7 doy   pct_soil_moisture_0_to_7cm         3307. 0.0432 0.686     7
##  8 doy   pct_pressure_msl                   2171. 0.0283 0.714     8
##  9 doy   pct_surface_pressure               2165. 0.0283 0.743     9
## 10 doy   pct_dewpoint_2m                    1987. 0.0259 0.768    10
## # ℹ 119 more rows
rfDOYpi <- runPartialImportanceRF(dfTrain=dfTrain %>% filter(doy<=365) %>% mutate(doy=factor(10*round(doy/10))), 
                                  yVar="doy", 
                                  dfTest=dfTest %>% filter(doy<=365) %>% mutate(doy=factor(10*round(doy/10))), 
                                  isContVar=FALSE,
                                  impDB=dfImp,
                                  nImp=4, 
                                  makePlots=TRUE, 
                                  returnElem=c("rfImp", "rfAcc", "tstPred")
                                  )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 42.534%

rfDOYpi$tstPred %>%
    group_by(month) %>%
    summarize(doyacc=mean(doy==pred), 
              doy10acc=mean(abs(as.integer(as.character(doy))-as.integer(as.character(pred))) %in% c(0, 10, 360)),
              n=n(), 
              .groups="drop"
              )
## # A tibble: 12 × 4
##    month doyacc doy10acc     n
##    <fct>  <dbl>    <dbl> <int>
##  1 Jan   0.284     0.800   744
##  2 Feb   0.223     1       672
##  3 Mar   0.0927    0.364   744
##  4 Apr   0.647     0.928   720
##  5 May   0.781     1       744
##  6 Jun   0.583     1       720
##  7 Jul   0.446     1       744
##  8 Aug   0.0941    0.546   744
##  9 Sep   0.275     0.847   720
## 10 Oct   0.667     1       744
## 11 Nov   0.585     1       720
## 12 Dec   0.419     1       744

Restricting to the top 4 predictors still drives most predictions to within 10-20 days

A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="doy")))

# Accuracy on holdout data
rpiDOY <- tibble::tibble(nImp=impNums, 
                         rfAcc=sapply(impNums, 
                                      FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain %>% 
                                                                                 filter(doy<=365) %>%
                                                                                 mutate(doy=factor(10*round(doy/10))),
                                                                             yVar="doy", 
                                                                             dfTest=dfTest %>% 
                                                                                 filter(doy<=365) %>%
                                                                                 mutate(doy=factor(10*round(doy/10))), 
                                                                             isContVar=FALSE, 
                                                                             impDB=dfImp, 
                                                                             nImp=x, 
                                                                             makePlots=FALSE
                                                                             )[["rfAcc"]]
                                      )
                         )
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 27.043%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 42.226%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 43.836%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 44.007%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 45.365%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 45.091%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 46.221%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 47.032%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 45.651%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 47.854%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 48.082%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 48.116%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 48.927%
rpiDOY
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.270
##  2     2 0.422
##  3     3 0.438
##  4     4 0.440
##  5     5 0.454
##  6     6 0.451
##  7     7 0.462
##  8     8 0.470
##  9     9 0.457
## 10    10 0.479
## 11    16 0.481
## 12    25 0.481
## 13    33 0.489
# Plot of holdout accuracy vs. number of variables
rpiDOY %>%
    bind_rows(tibble::tibble(nImp=0, rfAcc=0)) %>%
    ggplot(aes(x=nImp, y=rfAcc)) + 
    geom_line() + 
    geom_point() + 
    labs(title="Accuracy on holdout data vs. number of predictors", 
         subtitle="Predicting day of year, rounded to nearest 10, as factor",
         y="Accuracy on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(0, 1)) + 
    geom_hline(data=~filter(., rfAcc==max(rfAcc)), aes(yintercept=rfAcc), lty=2)

Two soil temperature metrics (1.00m-2.55 m and 0.28m-1.00m) as a standalone drive accuracy nearly as high as including all 33 predictors

Soil metrics are especially predictive of time of year:

dfPlot <- nycTempTrain %>%
    bind_rows(nycTempTest) %>%
    arrange(time) %>%
    mutate(doy=lubridate::yday(time))

dfPlot %>%
    count(month, soil_temperature_28_to_100cm, soil_temperature_100_to_255cm) %>%
    ggplot(aes(x=soil_temperature_28_to_100cm, y=soil_temperature_100_to_255cm)) + 
    geom_point(aes(color=month, size=n)) + 
    labs(title="Soil temperature readings by month and year")

dfPlot %>%
    count(year, month, soil_temperature_28_to_100cm, soil_temperature_100_to_255cm) %>%
    ggplot(aes(x=soil_temperature_28_to_100cm, y=soil_temperature_100_to_255cm)) + 
    geom_point(aes(color=month, size=n)) + 
    facet_wrap(~year) + 
    labs(title="Soil temperature readings by month and year")

dfPlot %>%
    count(year, month, soil_temperature_28_to_100cm, soil_temperature_100_to_255cm) %>%
    ggplot(aes(x=soil_temperature_28_to_100cm, y=soil_temperature_100_to_255cm)) + 
    geom_point(aes(color=factor(year), size=n)) + 
    facet_wrap(~month) + 
    labs(title="Soil temperature readings by month and year") + 
    scale_color_discrete(NULL)

dfPlot %>%
    group_by(doy, year) %>%
    summarize(across(.cols=c(soil_temperature_28_to_100cm, soil_temperature_100_to_255cm), 
                     .fns=list(mean=mean, sd=sd)
                     ), 
              .groups="drop"
              ) %>%
    pivot_longer(cols=-c(year, doy)) %>%
    ggplot(aes(x=doy, y=value)) + 
    geom_line(aes(group=year, color=factor(year))) + 
    facet_wrap(~name) + 
    scale_color_discrete(NULL) + 
    labs(title="Mean and sd for hourly soil temperature readings by day of year", x="Day of Year", y=NULL)

Deep soil temperatures are stable over extended time periods and display a repeatable, highly seasonal pattern. This makes them ideal predictors for metrics such as month or day of year

In contrast, air temperature and dewpoint show more intraday variability and less association with day of year:

dfPlotTemp <- dfPlot %>%
    mutate(across(.cols=c(temperature_2m, dewpoint_2m), .fns=round))

dfPlotTemp %>%
    count(month, temperature_2m, dewpoint_2m) %>%
    ggplot(aes(x=temperature_2m, y=dewpoint_2m)) + 
    geom_point(aes(color=month, size=n)) + 
    labs(title="Temperature and dewpoint readings by month and year", 
         subtitle="Readings taken hourly, rounded to nearest 1 degree C"
         )

dfPlotTemp %>%
    count(year, month, temperature_2m, dewpoint_2m) %>%
    ggplot(aes(x=temperature_2m, y=dewpoint_2m)) + 
    geom_point(aes(color=factor(year), size=n)) + 
    facet_wrap(~month) + 
    labs(title="Temperature and dewpoint readings by month and year", 
         subtitle="Readings taken hourly, rounded to nearest 1 degree C"
         ) +
    scale_color_discrete(NULL)

dfPlotTemp %>%
    group_by(doy, year) %>%
    summarize(across(.cols=c(temperature_2m, dewpoint_2m), 
                     .fns=list(mean=mean, sd=sd)
                     ), 
              .groups="drop"
              ) %>%
    pivot_longer(cols=-c(year, doy)) %>%
    ggplot(aes(x=doy, y=value)) + 
    geom_line(aes(group=year, color=factor(year))) + 
    facet_wrap(~name) + 
    scale_color_discrete(NULL) + 
    labs(title="Mean and sd for hourly air temperature and dewpoint readings by day of year", 
         x="Day of Year", 
         y=NULL, 
         subtitle="Rounded to nearest 1 degree C"
         )

dfPlotTemp %>%
    group_by(doy, year) %>%
    summarize(across(.cols=c(temperature_2m, dewpoint_2m), 
                     .fns=list(mean=mean, sd=sd)
                     ), 
              .groups="drop"
              ) %>%
    pivot_longer(cols=-c(year, doy)) %>%
    group_by(name) %>%
    summarize(dailyMean=mean(value), dailySD=sd(value))
## # A tibble: 4 × 3
##   name                dailyMean dailySD
##   <chr>                   <dbl>   <dbl>
## 1 dewpoint_2m_mean         6.56   10.1 
## 2 dewpoint_2m_sd           1.98    1.26
## 3 temperature_2m_mean     12.1     9.66
## 4 temperature_2m_sd        2.76    1.11

Soil moisture metrics are also explored:

dfPlot %>%
    count(month, soil_moisture_28_to_100cm, soil_moisture_100_to_255cm) %>%
    ggplot(aes(x=soil_moisture_28_to_100cm, y=soil_moisture_100_to_255cm)) + 
    geom_point(aes(color=month, size=n)) + 
    labs(title="Soil moisture readings by month and year")

dfPlot %>%
    count(year, month, soil_moisture_28_to_100cm, soil_moisture_100_to_255cm) %>%
    ggplot(aes(x=soil_moisture_28_to_100cm, y=soil_moisture_100_to_255cm)) + 
    geom_point(aes(color=month, size=n)) + 
    facet_wrap(~year) + 
    labs(title="Soil moisture readings by month and year")

dfPlot %>%
    count(year, month, soil_moisture_28_to_100cm, soil_moisture_100_to_255cm) %>%
    ggplot(aes(x=soil_moisture_28_to_100cm, y=soil_moisture_100_to_255cm)) + 
    geom_point(aes(color=factor(year), size=n)) + 
    facet_wrap(~month) + 
    labs(title="Soil moisture readings by month and year") + 
    scale_color_discrete(NULL)

dfPlot %>%
    group_by(doy, year) %>%
    summarize(across(.cols=c(soil_moisture_28_to_100cm, soil_moisture_100_to_255cm), 
                     .fns=list(mean=mean, sd=sd)
                     ), 
              .groups="drop"
              ) %>%
    pivot_longer(cols=-c(year, doy)) %>%
    ggplot(aes(x=doy, y=value)) + 
    geom_line(aes(group=year, color=factor(year))) + 
    facet_wrap(~name) + 
    scale_color_discrete(NULL) + 
    labs(title="Mean and sd for hourly soil moisture readings by day of year", x="Day of Year", y=NULL)

Deep soil moisture is less reproducibly seasonal, making it a less effective predictor of time of year

The function is tested on categorical variable year:

# Create holdout data and labels
dfTrain_v2 <- filter(nycTempTrain, lubridate::year(date)<2023) %>% mutate(doy=yday(date), fct_year=factor(year))
dfTest_v2 <- filter(nycTempTest, lubridate::year(date)<2023) %>% mutate(doy=yday(date), fct_year=factor(year))
keyLabel_v2 <- "predictions based on training data applied to holdout dataset"

rfYear <- runFullRF(dfTrain=dfTrain_v2, 
                     yVar="fct_year", 
                     xVars=varsTrain, 
                     dfTest=dfTest_v2, 
                     useLabel=keyLabel_v2, 
                     useSub=stringr::str_to_sentence(keyLabel_v2), 
                     returnData=TRUE
                     )

## 
## Accuracy of predictions based on training data applied to holdout dataset is: 99.986%

dfImp <- map_dfr(list("month"=rfMonth, 
                      "temperature_2m"=rfTemp2m, 
                      "soil_temperature_100_to_255cm"=rfSoil255, 
                      "doy"=rfDOYFct,
                      "fct_year"=rfYear
                      ), 
                 .f=function(x) x$rfImp, 
                 .id="src"
                 ) %>% 
    arrange(src, -imp) %>% 
    group_by(src) %>%
    mutate(pct=imp/sum(imp), cspct=cumsum(pct), n=row_number()) %>%
    ungroup()
dfImp
## # A tibble: 161 × 6
##    src   metric                               imp    pct cspct     n
##    <chr> <chr>                              <dbl>  <dbl> <dbl> <int>
##  1 doy   pct_soil_temperature_100_to_255cm 15156. 0.198  0.198     1
##  2 doy   pct_soil_temperature_28_to_100cm  10393. 0.136  0.333     2
##  3 doy   pct_soil_moisture_100_to_255cm     7681. 0.100  0.434     3
##  4 doy   pct_soil_moisture_28_to_100cm      6656. 0.0869 0.521     4
##  5 doy   pct_soil_temperature_7_to_28cm     4736. 0.0618 0.582     5
##  6 doy   pct_soil_moisture_7_to_28cm        4630. 0.0604 0.643     6
##  7 doy   pct_soil_moisture_0_to_7cm         3307. 0.0432 0.686     7
##  8 doy   pct_pressure_msl                   2171. 0.0283 0.714     8
##  9 doy   pct_surface_pressure               2165. 0.0283 0.743     9
## 10 doy   pct_dewpoint_2m                    1987. 0.0259 0.768    10
## # ℹ 151 more rows

There is both daily stability and annual variation in the combined metrics such that seeing ~70% of the hourly observations for each day is sufficient to determine the year

A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="fct_year")))

# Accuracy on holdout data
rpiYear <- tibble::tibble(nImp=impNums, 
                          rfAcc=sapply(impNums, 
                                       FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain_v2,
                                                                              yVar="fct_year", 
                                                                              dfTest=dfTest_v2, 
                                                                              isContVar=FALSE, 
                                                                              impDB=dfImp, 
                                                                              nImp=x, 
                                                                              makePlots=FALSE
                                                                              )[["rfAcc"]]
                                       )
                          )
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 27.196%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 76.684%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.515%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.716%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.937%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.972%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.972%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.982%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.982%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.979%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.986%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.989%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.982%
rpiYear
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.272
##  2     2 0.767
##  3     3 0.985
##  4     4 0.997
##  5     5 0.999
##  6     6 1.00 
##  7     7 1.00 
##  8     8 1.00 
##  9     9 1.00 
## 10    10 1.00 
## 11    16 1.00 
## 12    25 1.00 
## 13    32 1.00
# Plot of holdout accuracy vs. number of variables
rpiYear %>%
    bind_rows(tibble::tibble(nImp=0, rfAcc=0)) %>%
    ggplot(aes(x=nImp, y=rfAcc)) + 
    geom_line() + 
    geom_point() + 
    labs(title="Accuracy on holdout data vs. number of predictors", 
         subtitle="Predicting year as factor",
         y="Accuracy on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(0, 1)) + 
    geom_hline(data=~filter(., rfAcc==max(rfAcc)), aes(yintercept=rfAcc), lty=2)

The function is tested on categorical variable hour:

# Create holdout data and labels
dfTrain_v3 <- filter(nycTempTrain, lubridate::year(date)<2022) %>% mutate(doy=yday(date))
dfTest_v3 <- filter(nycTempTest, lubridate::year(date)==2022) %>% mutate(doy=yday(date))
keyLabel_v3 <- "predictions based on training data applied to holdout dataset"

rfHour <- runFullRF(dfTrain=dfTrain_v3, 
                     yVar="fct_hour", 
                     xVars=c(varsTrain, "month", "doy"), 
                     dfTest=dfTest_v3, 
                     useLabel=keyLabel_v3, 
                     useSub=stringr::str_to_sentence(keyLabel_v3), 
                     returnData=TRUE
                     )
## Growing trees.. Progress: 92%. Estimated remaining time: 2 seconds.

## 
## Accuracy of predictions based on training data applied to holdout dataset is: 41.281%

dfImp <- map_dfr(list("month"=rfMonth, 
                      "temperature_2m"=rfTemp2m, 
                      "soil_temperature_100_to_255cm"=rfSoil255, 
                      "doy"=rfDOYFct,
                      "fct_year"=rfYear, 
                      "fct_hour"=rfHour
                      ), 
                 .f=function(x) x$rfImp, 
                 .id="src"
                 ) %>% 
    arrange(src, -imp) %>% 
    group_by(src) %>%
    mutate(pct=imp/sum(imp), cspct=cumsum(pct), n=row_number()) %>%
    ungroup()
dfImp
## # A tibble: 195 × 6
##    src   metric                               imp    pct cspct     n
##    <chr> <chr>                              <dbl>  <dbl> <dbl> <int>
##  1 doy   pct_soil_temperature_100_to_255cm 15156. 0.198  0.198     1
##  2 doy   pct_soil_temperature_28_to_100cm  10393. 0.136  0.333     2
##  3 doy   pct_soil_moisture_100_to_255cm     7681. 0.100  0.434     3
##  4 doy   pct_soil_moisture_28_to_100cm      6656. 0.0869 0.521     4
##  5 doy   pct_soil_temperature_7_to_28cm     4736. 0.0618 0.582     5
##  6 doy   pct_soil_moisture_7_to_28cm        4630. 0.0604 0.643     6
##  7 doy   pct_soil_moisture_0_to_7cm         3307. 0.0432 0.686     7
##  8 doy   pct_pressure_msl                   2171. 0.0283 0.714     8
##  9 doy   pct_surface_pressure               2165. 0.0283 0.743     9
## 10 doy   pct_dewpoint_2m                    1987. 0.0259 0.768    10
## # ℹ 185 more rows

Hour (as factor) is predicted using only the four top importance variables:

runPartialImportanceRF(dfTrain=dfTrain_v3, 
                       yVar="fct_hour", 
                       dfTest=dfTest_v3, 
                       isContVar=FALSE,
                       impDB=dfImp,
                       nImp=4, 
                       makePlots=TRUE
                       )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 37.027%

## $rfImp
## # A tibble: 4 × 2
##   metric                        imp
##   <chr>                       <dbl>
## 1 pct_diffuse_radiation       8767.
## 2 pct_shortwave_radiation     8763.
## 3 pct_vapor_pressure_deficit  8667.
## 4 doy                        15474.
## 
## $rfAcc
## [1] 0.3702665

Prediction accuracy is decreased by ~5% as predictors are limited to 4, with some of the nighttime hours never predicted. A series of models are run, using a variable number of predictors:

# Variable importance number of variables to explore
impNums <- c(1:10, 16, 25, nrow(filter(dfImp, src=="fct_hour")))

# Accuracy on holdout data
rpiHour <- tibble::tibble(nImp=impNums, 
                          rfAcc=sapply(impNums, 
                                       FUN=function(x) runPartialImportanceRF(dfTrain=dfTrain_v3, 
                                                                              yVar="fct_hour", 
                                                                              dfTest=dfTest_v3, 
                                                                              isContVar=FALSE, 
                                                                              impDB=dfImp, 
                                                                              nImp=x, 
                                                                              makePlots=FALSE)[["rfAcc"]]
                                       )
                          )
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 11.501%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 17.485%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 20.196%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 37.26%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 37.681%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.897%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.569%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.523%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 40.673%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.429%
## Growing trees.. Progress: 97%. Estimated remaining time: 1 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 39.224%
## Growing trees.. Progress: 87%. Estimated remaining time: 4 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 40.58%
## Growing trees.. Progress: 86%. Estimated remaining time: 4 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 42.169%
rpiHour
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.115
##  2     2 0.175
##  3     3 0.202
##  4     4 0.373
##  5     5 0.377
##  6     6 0.389
##  7     7 0.386
##  8     8 0.385
##  9     9 0.407
## 10    10 0.384
## 11    16 0.392
## 12    25 0.406
## 13    34 0.422
# Plot of holdout accuracy vs. number of variables
rpiHour %>%
    bind_rows(tibble::tibble(nImp=0, rfAcc=0)) %>%
    ggplot(aes(x=nImp, y=rfAcc)) + 
    geom_line() + 
    geom_point() + 
    labs(title="Accuracy on holdout data vs. number of predictors", 
         subtitle="Predicting hour",
         y="Accuracy on holdout data", 
         x="# Predictors (selected in order of variable importance in full model)"
         ) + 
    lims(y=c(0, 1)) + 
    geom_hline(data=~filter(., rfAcc==max(rfAcc)), aes(yintercept=rfAcc), lty=2)

# Correlations
dfTrain %>% 
    select(all_of(varsTrain)) %>% 
    cor() %>% 
    as.data.frame() %>% 
    rownames_to_column("V1") %>% 
    tibble::tibble() %>% 
    pivot_longer(cols=-c(V1), names_to="V2") %>% 
    filter(V1 %in% pull(filter(dfImp, src=="fct_hour", n<=8), "metric"), 
           V2 %in% pull(filter(dfImp, src=="fct_hour", n<=8), "metric")
           ) %>% 
    ggplot(aes(x=fct_rev(V1), y=V2)) + 
    geom_tile(aes(fill=value)) + 
    geom_text(aes(label=round(value, 2))) + 
    scale_fill_gradient2(high="green") + 
    labs(title="Correlations of select predictors in training data", x=NULL, y=NULL)

Accuracy generally increases with number of predictors, plateauing around 40% with 4+ predictors

The process for running partial importance is converted to functional form:

autoPartialImportance <- function(dfTrain, 
                                  dfTest, 
                                  yVar, 
                                  isContVar,
                                  impDB=dfImp,
                                  impNums=c(1:10, 16, 25, nrow(filter(dfImp, src==yVar)))
                                  ) {
    
    # FUNCTION ARGUMENTS:
    # dfTrain: training data
    # dfTest: test (holdout) data
    # yVar: dependent variable
    # isContVar: boolean, is this a contnuous variable (R-2) or categorical variable (accuracy)?
    # impDB: tibble containing sorted variable importances by predictor
    # impNums: vector of number of variables to run (each element in vector run)
    
    # Accuracy on holdout data
    tblRPI <- tibble::tibble(nImp=impNums, 
                             rfAcc=sapply(impNums, 
                                          FUN=function(x) {y <- runPartialImportanceRF(dfTrain=dfTrain, 
                                                                                       yVar=yVar, 
                                                                                       dfTest=dfTest, 
                                                                                       isContVar=isContVar, 
                                                                                       impDB=impDB, 
                                                                                       nImp=x, 
                                                                                       makePlots=FALSE
                                                                                       )[["rfAcc"]]
                                                           if(isTRUE(isContVar)) y <- y["r2"]
                                                           y
                                                           }
                                          )
                             )
    print(tblRPI)

    # Plot of holdout accuracy/r-squared vs. number of variables
    # if(isTRUE(isContVar)) tblRPI <- tblRPI %>% mutate(rfAcc=r2)
    if(isTRUE(isContVar)) prtDesc <- "R-squared" else prtDesc <- "Accuracy"
    p1 <- tblRPI %>%
        select(nImp, rfAcc) %>%
        bind_rows(tibble::tibble(nImp=0, rfAcc=0)) %>%
        ggplot(aes(x=nImp, y=rfAcc)) + 
        geom_line() + 
        geom_point() + 
        labs(title=paste0(prtDesc, " on holdout data vs. number of predictors"), 
             subtitle=paste0("Predicting ", yVar),
             y=paste0(prtDesc, " on holdout data"), 
             x="# Predictors (selected in order of variable importance in full model)"
             ) + 
        lims(y=c(0, 1)) + 
        geom_hline(data=~filter(., rfAcc==max(rfAcc)), aes(yintercept=rfAcc), lty=2)
    print(p1)
    
    return(tblRPI)
    
}

The function is tested on hour, as factor:

apiHour <- autoPartialImportance(dfTrain=dfTrain_v3, dfTest=dfTest_v3, yVar="fct_hour", isContVar=FALSE)
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 11.267%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 17.251%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 20.103%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 36.886%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.616%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 39.177%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.242%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 37.775%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 40.065%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.476%
## Growing trees.. Progress: 100%. Estimated remaining time: 0 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 38.476%
## Growing trees.. Progress: 57%. Estimated remaining time: 23 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 40.72%
## Growing trees.. Progress: 64%. Estimated remaining time: 17 seconds.
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 41.842%
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.113
##  2     2 0.173
##  3     3 0.201
##  4     4 0.369
##  5     5 0.386
##  6     6 0.392
##  7     7 0.382
##  8     8 0.378
##  9     9 0.401
## 10    10 0.385
## 11    16 0.385
## 12    25 0.407
## 13    34 0.418

apiHour %>%
    colRenamer(c("rfAcc"="apiAcc")) %>%
    full_join(rpiHour, by=c("nImp")) %>%
    colRenamer(c("rfAcc"="rpiAcc")) %>%
    pivot_longer(cols=-c(nImp)) %>%
    ggplot(aes(x=nImp, y=value)) + 
    geom_point(aes(color=name)) + 
    geom_line(aes(group=name, color=name)) + 
    labs(title=paste0("Accuracy", " on holdout data vs. number of predictors"), 
             subtitle=paste0("Predicting ", "hour as factor", " using function api and previous results rpi"),
             y=paste0("Accuracy", " on holdout data"), 
             x="# Predictors (selected in order of variable importance in full model)"
             ) + 
    lims(y=c(0, 1)) + 
    scale_color_discrete(NULL)

While there are minor differences due to different random states, the function broadly gives the same results as the previous code

The function is tested on temperature:

apiTemp <- autoPartialImportance(dfTrain=dfTrain, dfTest=dfTest, yVar="temperature_2m", isContVar=TRUE)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 96.135% (RMSE 2.03 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.081% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.266% (RMSE 1.36 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.397% (RMSE 1.3 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 98.498% (RMSE 1.26 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.822% (RMSE 0.43 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.743% (RMSE 0.52 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.663% (RMSE 0.6 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.79% (RMSE 0.47 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.77% (RMSE 0.49 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.766% (RMSE 0.5 vs. 10.3 null)
## Growing trees.. Progress: 100%. Estimated remaining time: 0 seconds.
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.465% (RMSE 0.75 vs. 10.3 null)
## 
## R-squared of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 99.259% (RMSE 0.89 vs. 10.3 null)
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.961
##  2     2 0.981
##  3     3 0.983
##  4     4 0.984
##  5     5 0.985
##  6     6 0.998
##  7     7 0.997
##  8     8 0.997
##  9     9 0.998
## 10    10 0.998
## 11    16 0.998
## 12    25 0.995
## 13    33 0.993

apiTemp %>%
    colRenamer(c("rfAcc"="apiR2")) %>%
    full_join(rpiTemp, by=c("nImp")) %>%
    colRenamer(c("r2"="rpiR2")) %>%
    pivot_longer(cols=-c(nImp)) %>%
    ggplot(aes(x=nImp, y=value)) + 
    geom_point(aes(color=name)) + 
    geom_line(aes(group=name, color=name)) + 
    labs(title=paste0("R-squared", " on holdout data vs. number of predictors"), 
             subtitle=paste0("Predicting ", "temperature", " using function api and previous results rpi"),
             y=paste0("R-squared", " on holdout data"), 
             x="# Predictors (selected in order of variable importance in full model)"
             ) + 
    lims(y=c(0, 1)) + 
    scale_color_discrete(NULL)

While there are minor differences due to different random states, the function broadly gives the same results as the previous code

The function is tested on categorical variable todSeason (a mix of day/night and the four seasons):

rfTODS <- runFullRF(dfTrain=dfTrain_v3, 
                    yVar="todSeason", 
                    xVars=c(varsTrain), 
                    dfTest=dfTest_v3, 
                    useLabel=keyLabel_v3, 
                    useSub=stringr::str_to_sentence(keyLabel_v3), 
                    returnData=TRUE
                    )

## 
## Accuracy of predictions based on training data applied to holdout dataset is: 89.06%

dfImp <- map_dfr(list("month"=rfMonth, 
                      "temperature_2m"=rfTemp2m, 
                      "soil_temperature_100_to_255cm"=rfSoil255, 
                      "doy"=rfDOYFct,
                      "fct_year"=rfYear, 
                      "fct_hour"=rfHour, 
                      "todSeason"=rfTODS
                      ), 
                 .f=function(x) x$rfImp, 
                 .id="src"
                 ) %>% 
    arrange(src, -imp) %>% 
    group_by(src) %>%
    mutate(pct=imp/sum(imp), cspct=cumsum(pct), n=row_number()) %>%
    ungroup()
dfImp
## # A tibble: 227 × 6
##    src   metric                               imp    pct cspct     n
##    <chr> <chr>                              <dbl>  <dbl> <dbl> <int>
##  1 doy   pct_soil_temperature_100_to_255cm 15156. 0.198  0.198     1
##  2 doy   pct_soil_temperature_28_to_100cm  10393. 0.136  0.333     2
##  3 doy   pct_soil_moisture_100_to_255cm     7681. 0.100  0.434     3
##  4 doy   pct_soil_moisture_28_to_100cm      6656. 0.0869 0.521     4
##  5 doy   pct_soil_temperature_7_to_28cm     4736. 0.0618 0.582     5
##  6 doy   pct_soil_moisture_7_to_28cm        4630. 0.0604 0.643     6
##  7 doy   pct_soil_moisture_0_to_7cm         3307. 0.0432 0.686     7
##  8 doy   pct_pressure_msl                   2171. 0.0283 0.714     8
##  9 doy   pct_surface_pressure               2165. 0.0283 0.743     9
## 10 doy   pct_dewpoint_2m                    1987. 0.0259 0.768    10
## # ℹ 217 more rows

Variable todSeason is predicted using only the four top importance variables:

runPartialImportanceRF(dfTrain=dfTrain_v3, 
                       yVar="todSeason", 
                       dfTest=dfTest_v3, 
                       isContVar=FALSE,
                       impDB=dfImp,
                       nImp=4, 
                       makePlots=TRUE
                       )

## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 44.086%

## $rfImp
## # A tibble: 4 × 2
##   metric                               imp
##   <chr>                              <dbl>
## 1 pct_soil_temperature_100_to_255cm 16650.
## 2 pct_soil_temperature_28_to_100cm  12700.
## 3 pct_soil_temperature_7_to_28cm    13211.
## 4 pct_soil_moisture_100_to_255cm     7694.
## 
## $rfAcc
## [1] 0.4408602

Prediction accuracy is decreased as predictors are limited to 4, with day/night especially impacted due to lack of a radiation variable. A series of models are run, using a variable number of predictors:

Variable todSeason is predicted using a range of predictors:

apiTODS <- autoPartialImportance(dfTrain=dfTrain_v3, dfTest=dfTest_v3, yVar="todSeason", isContVar=FALSE)
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 31.043%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 46.985%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 47.639%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 43.899%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 82.375%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 84.198%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 86.162%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 88.499%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 87.471%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 87.845%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 88.967%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 89.014%
## 
## Accuracy of predictions based on pre-2022 training data applied to 2022 holdout dataset is: 88.873%
## # A tibble: 13 × 2
##     nImp rfAcc
##    <dbl> <dbl>
##  1     1 0.310
##  2     2 0.470
##  3     3 0.476
##  4     4 0.439
##  5     5 0.824
##  6     6 0.842
##  7     7 0.862
##  8     8 0.885
##  9     9 0.875
## 10    10 0.878
## 11    16 0.890
## 12    25 0.890
## 13    32 0.889

Prediction accuracy soars when a soil temperature variable and a radiation variable are both included

Each possible predictor is run on a stand-alone basis:

tstOneVar <- sapply(varsTrain, FUN=function(x) {
    runFullRF(dfTrain=dfTrain_v3, 
              yVar="todSeason", 
              xVars=x,
              dfTest=dfTest_v3, 
              useLabel=keyLabel_v3, 
              useSub=stringr::str_to_sentence(keyLabel_v3), 
              makePlots=FALSE,
              returnData=TRUE
              )[["rfAcc"]]
    }
)
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 37.868%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 17.672%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 27.583%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 35.624%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 18.28%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 19.355%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 13.417%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 13.417%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 13.511%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 19.308%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 16.269%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 20.757%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 16.082%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 30.201%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 27.489%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 28.237%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 31.744%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 16.316%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 17.625%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 12.529%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 13.978%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 15.708%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 29.032%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 28.612%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.645%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 34.923%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 30.108%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 31.136%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 28.284%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 24.638%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 23.142%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 32.726%
tstOneVar %>% 
    as.data.frame() %>% 
    purrr::set_names("rfAcc") %>% 
    rownames_to_column("pred") %>% 
    tibble::tibble() %>%
    arrange(desc(rfAcc)) %>%
    print(n=40)
## # A tibble: 32 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_soil_temperature_0_to_7cm     0.396
##  2 pct_temperature_2m                0.379
##  3 pct_apparent_temperature          0.356
##  4 pct_soil_temperature_7_to_28cm    0.349
##  5 pct_soil_moisture_100_to_255cm    0.327
##  6 pct_diffuse_radiation             0.317
##  7 pct_soil_temperature_100_to_255cm 0.311
##  8 pct_shortwave_radiation           0.302
##  9 pct_soil_temperature_28_to_100cm  0.301
## 10 pct_et0_fao_evapotranspiration    0.290
## 11 pct_vapor_pressure_deficit        0.286
## 12 pct_soil_moisture_0_to_7cm        0.283
## 13 pct_direct_normal_irradiance      0.282
## 14 pct_dewpoint_2m                   0.276
## 15 pct_direct_radiation              0.275
## 16 pct_soil_moisture_7_to_28cm       0.246
## 17 pct_soil_moisture_28_to_100cm     0.231
## 18 pct_cloudcover_mid                0.208
## 19 pct_surface_pressure              0.194
## 20 pct_cloudcover                    0.193
## 21 pct_pressure_msl                  0.183
## 22 pct_relativehumidity_2m           0.177
## 23 pct_windspeed_100m                0.176
## 24 pct_windspeed_10m                 0.163
## 25 pct_cloudcover_low                0.163
## 26 pct_cloudcover_high               0.161
## 27 pct_windgusts_10m                 0.157
## 28 pct_winddirection_100m            0.140
## 29 pct_snowfall                      0.135
## 30 pct_precipitation                 0.134
## 31 pct_rain                          0.134
## 32 pct_winddirection_10m             0.125

Each possible predictor is run on a stand-alone basis, along with the best predictor:

bestOneVar <- (tstOneVar %>% sort(decreasing = TRUE) %>% names())[1]
bestOneVar
## [1] "pct_soil_temperature_0_to_7cm"
tstTwoVar <- sapply(setdiff(varsTrain, bestOneVar), FUN=function(x) {
    runFullRF(dfTrain=dfTrain_v3, 
              yVar="todSeason", 
              xVars=c(x, bestOneVar),
              dfTest=dfTest_v3, 
              useLabel=keyLabel_v3, 
              useSub=stringr::str_to_sentence(keyLabel_v3), 
              makePlots=FALSE,
              returnData=TRUE
              )[["rfAcc"]]
    }
)
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 42.122%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 44.226%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 42.403%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 43.619%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.691%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.598%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 38.756%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 38.429%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 37.962%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 38.382%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 37.588%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.878%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 40.439%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.366%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 52.735%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 50.538%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.862%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.598%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.878%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 36.372%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 36.606%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.224%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.007%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 44.554%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 53.109%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 52.034%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 58.813%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 41%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 39.364%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 43.011%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 47.592%
tstTwoVar %>% 
    as.data.frame() %>% 
    purrr::set_names("rfAcc") %>% 
    rownames_to_column("pred") %>% 
    tibble::tibble() %>%
    arrange(desc(rfAcc)) %>%
    print(n=40)
## # A tibble: 31 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_diffuse_radiation             0.639
##  2 pct_shortwave_radiation           0.624
##  3 pct_soil_temperature_100_to_255cm 0.588
##  4 pct_et0_fao_evapotranspiration    0.560
##  5 pct_soil_temperature_7_to_28cm    0.531
##  6 pct_direct_radiation              0.527
##  7 pct_soil_temperature_28_to_100cm  0.520
##  8 pct_direct_normal_irradiance      0.505
##  9 pct_soil_moisture_100_to_255cm    0.476
## 10 pct_vapor_pressure_deficit        0.446
## 11 pct_relativehumidity_2m           0.442
## 12 pct_apparent_temperature          0.436
## 13 pct_soil_moisture_28_to_100cm     0.430
## 14 pct_dewpoint_2m                   0.424
## 15 pct_temperature_2m                0.421
## 16 pct_soil_moisture_0_to_7cm        0.410
## 17 pct_cloudcover_high               0.404
## 18 pct_cloudcover_mid                0.399
## 19 pct_windspeed_100m                0.399
## 20 pct_pressure_msl                  0.397
## 21 pct_surface_pressure              0.396
## 22 pct_windspeed_10m                 0.396
## 23 pct_soil_moisture_7_to_28cm       0.394
## 24 pct_windgusts_10m                 0.392
## 25 pct_precipitation                 0.388
## 26 pct_rain                          0.384
## 27 pct_cloudcover                    0.384
## 28 pct_snowfall                      0.380
## 29 pct_cloudcover_low                0.376
## 30 pct_winddirection_100m            0.366
## 31 pct_winddirection_10m             0.364

Each possible predictor is run on a stand-alone basis, along with the best two predictors:

bestTwoVar <- (tstTwoVar %>% sort(decreasing = TRUE) %>% names())[1]
bestTwoVar
## [1] "pct_diffuse_radiation"
tstThreeVar <- sapply(setdiff(varsTrain, c(bestOneVar, bestTwoVar)), FUN=function(x) {
    runFullRF(dfTrain=dfTrain_v3, 
              yVar="todSeason", 
              xVars=c(x, bestOneVar, bestTwoVar),
              dfTest=dfTest_v3, 
              useLabel=keyLabel_v3, 
              useSub=stringr::str_to_sentence(keyLabel_v3), 
              makePlots=FALSE,
              returnData=TRUE
              )[["rfAcc"]]
    }
)
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.768%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.319%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.049%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.16%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.217%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.61%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.412%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.709%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.085%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.207%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.431%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.329%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.002%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.148%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.101%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.054%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.992%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.786%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 60.402%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.664%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.347%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.571%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.973%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.965%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 75.129%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.32%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.815%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.002%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.965%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 77.653%
tstThreeVar %>% 
    as.data.frame() %>% 
    purrr::set_names("rfAcc") %>% 
    rownames_to_column("pred") %>% 
    tibble::tibble() %>%
    arrange(desc(rfAcc)) %>%
    print(n=40)
## # A tibble: 30 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_soil_temperature_100_to_255cm 0.853
##  2 pct_soil_moisture_100_to_255cm    0.777
##  3 pct_soil_temperature_28_to_100cm  0.751
##  4 pct_soil_temperature_7_to_28cm    0.660
##  5 pct_soil_moisture_28_to_100cm     0.660
##  6 pct_pressure_msl                  0.652
##  7 pct_surface_pressure              0.646
##  8 pct_cloudcover_mid                0.643
##  9 pct_dewpoint_2m                   0.640
## 10 pct_cloudcover_high               0.640
## 11 pct_soil_moisture_7_to_28cm       0.640
## 12 pct_soil_moisture_0_to_7cm        0.638
## 13 pct_temperature_2m                0.638
## 14 pct_windgusts_10m                 0.633
## 15 pct_cloudcover                    0.632
## 16 pct_apparent_temperature          0.632
## 17 pct_vapor_pressure_deficit        0.630
## 18 pct_windspeed_100m                0.628
## 19 pct_precipitation                 0.624
## 20 pct_relativehumidity_2m           0.623
## 21 pct_snowfall                      0.621
## 22 pct_windspeed_10m                 0.620
## 23 pct_winddirection_100m            0.617
## 24 pct_et0_fao_evapotranspiration    0.616
## 25 pct_cloudcover_low                0.614
## 26 pct_winddirection_10m             0.604
## 27 pct_rain                          0.567
## 28 pct_shortwave_radiation           0.561
## 29 pct_direct_radiation              0.561
## 30 pct_direct_normal_irradiance      0.561

Each possible predictor is run on a stand-alone basis, along with the best three predictors:

bestThreeVar <- (tstThreeVar %>% sort(decreasing = TRUE) %>% names())[1]
bestThreeVar
## [1] "pct_soil_temperature_100_to_255cm"
tstFourVar <- sapply(setdiff(varsTrain, c(bestOneVar, bestTwoVar, bestThreeVar)), FUN=function(x) {
    runFullRF(dfTrain=dfTrain_v3, 
              yVar="todSeason", 
              xVars=c(x, bestOneVar, bestTwoVar, bestThreeVar),
              dfTest=dfTest_v3, 
              useLabel=keyLabel_v3, 
              useSub=stringr::str_to_sentence(keyLabel_v3), 
              makePlots=FALSE,
              returnData=TRUE
              )[["rfAcc"]]
    }
)
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.712%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.227%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.572%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.04%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.338%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.105%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.479%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.666%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.04%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.554%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.479%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.666%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.881%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.928%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.788%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.554%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 83.871%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.105%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 83.918%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 83.918%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 84.525%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.647%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.367%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 86.863%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 86.255%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.507%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 86.068%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.273%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 77.84%
tstFourVar %>% 
    as.data.frame() %>% 
    purrr::set_names("rfAcc") %>% 
    rownames_to_column("pred") %>% 
    tibble::tibble() %>%
    arrange(desc(rfAcc)) %>%
    print(n=40)
## # A tibble: 29 × 2
##    pred                             rfAcc
##    <chr>                            <dbl>
##  1 pct_soil_temperature_7_to_28cm   0.869
##  2 pct_soil_temperature_28_to_100cm 0.863
##  3 pct_soil_moisture_7_to_28cm      0.861
##  4 pct_shortwave_radiation          0.859
##  5 pct_cloudcover_high              0.859
##  6 pct_direct_radiation             0.858
##  7 pct_et0_fao_evapotranspiration   0.856
##  8 pct_cloudcover                   0.856
##  9 pct_direct_normal_irradiance     0.856
## 10 pct_soil_moisture_0_to_7cm       0.855
## 11 pct_vapor_pressure_deficit       0.854
## 12 pct_soil_moisture_28_to_100cm    0.853
## 13 pct_relativehumidity_2m          0.852
## 14 pct_apparent_temperature         0.850
## 15 pct_snowfall                     0.850
## 16 pct_temperature_2m               0.847
## 17 pct_rain                         0.847
## 18 pct_cloudcover_mid               0.847
## 19 pct_dewpoint_2m                  0.846
## 20 pct_windgusts_10m                0.845
## 21 pct_precipitation                0.845
## 22 pct_cloudcover_low               0.845
## 23 pct_pressure_msl                 0.843
## 24 pct_surface_pressure             0.841
## 25 pct_windspeed_100m               0.841
## 26 pct_winddirection_10m            0.839
## 27 pct_winddirection_100m           0.839
## 28 pct_windspeed_10m                0.839
## 29 pct_soil_moisture_100_to_255cm   0.778

The evolution in prediction accuracy is plotted:

tibble::tibble(x=0:4, 
               y=c(0, sapply(list(tstOneVar, tstTwoVar, tstThreeVar, tstFourVar), FUN=function(x) max(x)))
               ) %>%
    bind_rows(apiTODS %>% filter(nImp==max(nImp)) %>% select(x=nImp, y=rfAcc)) %>%
    ggplot(aes(x=x, y=y)) + 
    geom_point() + 
    geom_line() + 
    geom_text(aes(label=round(y, 2), y=ifelse(x>=4, y=y-0.05, y), x=ifelse(x>=4, x+1, x-0.5)), 
              hjust=1, 
              size=3
              ) +
    geom_hline(aes(yintercept=max(y)), lty=2) +
    lims(y=c(0, 1)) + 
    labs(x="# Predictors", 
         y="Accuracy on holdout data", 
         title="Accuracy of predicting day-night season by number of predictors"
         )

The first three predictors each significantly improve accuracy. A soil temperature variable helps predict season, then a radiation variable helps predict day-night, then a second soil temperature variable significantly refines prediction of season. There is very little gain for 4+ predictors

The process is converted to functional form:

runNextBestPredictor <- function(varsRun, 
                                 xFix, 
                                 yVar, 
                                 isContVar,
                                 dfTrain,
                                 dfTest=dfTrain, 
                                 useLabel="predictions based on training data applied to holdout dataset",
                                 useSub=stringr::str_to_sentence(keyLabel_v3), 
                                 makePlots=FALSE
                                 ) {
    
    # FUNCTION ARGUMENTS:
    # varsRun: variables to be run as potential next-best predictors
    # xFix: variables that are already included in every test of next-best
    # yVar: dependent variable of interest
    # isContVar: boolean, is yvar continuous?
    # dfTrain: training data
    # dfTest: test data
    # useLabel: descriptive label
    # useSub: subtitle description
    # makePlots: boolean, should plots be created for each predictor run?
    
    vecAcc <- sapply(varsRun, FUN=function(x) {
        y <- runFullRF(dfTrain=dfTrain, 
                  yVar=yVar, 
                  xVars=c(xFix, x),
                  dfTest=dfTest, 
                  useLabel=useLabel, 
                  useSub=useSub,
                  isContVar=isContVar,
                  makePlots=makePlots,
                  returnData=TRUE
                  )[["rfAcc"]]
        if(isTRUE(isContVar)) y[["r2"]] else y
        }
        )

    vecAcc %>% 
        as.data.frame() %>% 
        purrr::set_names("rfAcc") %>% 
        rownames_to_column("pred") %>% 
        tibble::tibble() %>%
        arrange(desc(rfAcc)) %>%
        print(n=40)
    
    vecAcc

}

The function is tested for a categorical variable:

rnbpThree <- runNextBestPredictor(varsRun=(tstThreeVar %>% sort(decreasing=TRUE) %>% names), 
                                  xFix=c(bestOneVar, bestTwoVar), 
                                  yVar="todSeason", 
                                  isContVar=FALSE, 
                                  dfTrain=dfTrain_v3, 
                                  dfTest=dfTest_v3
                                  )
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 85.273%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 77.84%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 75.316%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.685%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.778%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 65.124%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.843%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.236%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.955%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.002%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.207%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 64.376%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.581%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.786%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.534%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 63.067%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.366%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.973%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 60.776%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.506%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 60.449%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 62.319%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.337%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 61.805%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 60.823%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 60.168%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 57.363%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 55.961%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.101%
## 
## Accuracy of predictions based on training data applied to holdout dataset is: 56.241%
## # A tibble: 30 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_soil_temperature_100_to_255cm 0.853
##  2 pct_soil_moisture_100_to_255cm    0.778
##  3 pct_soil_temperature_28_to_100cm  0.753
##  4 pct_soil_moisture_28_to_100cm     0.658
##  5 pct_soil_temperature_7_to_28cm    0.657
##  6 pct_pressure_msl                  0.651
##  7 pct_surface_pressure              0.648
##  8 pct_soil_moisture_0_to_7cm        0.644
##  9 pct_cloudcover_mid                0.642
## 10 pct_cloudcover_high               0.640
## 11 pct_dewpoint_2m                   0.640
## 12 pct_temperature_2m                0.636
## 13 pct_cloudcover                    0.635
## 14 pct_soil_moisture_7_to_28cm       0.632
## 15 pct_apparent_temperature          0.631
## 16 pct_windspeed_100m                0.630
## 17 pct_windgusts_10m                 0.628
## 18 pct_relativehumidity_2m           0.625
## 19 pct_vapor_pressure_deficit        0.624
## 20 pct_windspeed_10m                 0.623
## 21 pct_et0_fao_evapotranspiration    0.618
## 22 pct_winddirection_100m            0.613
## 23 pct_cloudcover_low                0.608
## 24 pct_precipitation                 0.608
## 25 pct_snowfall                      0.604
## 26 pct_winddirection_10m             0.602
## 27 pct_rain                          0.574
## 28 pct_direct_normal_irradiance      0.562
## 29 pct_direct_radiation              0.561
## 30 pct_shortwave_radiation           0.560

Results are compared to the previous process, with only slight differences likely due to different random states observed:

rownames_to_column(as.data.frame(rnbpThree), "predictor") %>% 
    full_join(rownames_to_column(as.data.frame(tstThreeVar), "predictor"), by="predictor") %>% 
    tibble::tibble() %>% 
    pivot_longer(cols=-c(predictor)) %>% 
    ggplot(aes(x=fct_reorder(predictor, value), y=value)) + 
    geom_point(aes(color=c("rnbpThree"="New\nFunction", "tstThreeVar"="Previous\nResult")[name])) + 
    coord_flip() + 
    geom_hline(lty=2, yintercept=max(tstTwoVar)) +
    labs(y="Accuracy", 
         x=NULL, 
         title="Accuracy for each predictor added standalone as next-best third predictor",
         subtitle="Dashed line is baseline two-predictor accuracy"
         ) + 
    scale_color_discrete(NULL)

The function is tested for a continuous variable:

rnbpContOne <- runNextBestPredictor(varsRun=c(varsTrain[!str_detect(varsTrain, "pct_temp|apparent")], 
                                              "month", 
                                              "tod", 
                                              "doy"
                                              ), 
                                    xFix=c(), 
                                    yVar="temperature_2m", 
                                    isContVar=TRUE, 
                                    dfTrain=dfTrain, 
                                    dfTest=dfTest
                                    )
## 
## R-squared of predictions based on training data applied to holdout dataset is: -0.449% (RMSE 10.33 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 83.692% (RMSE 4.16 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 20.318% (RMSE 9.2 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 19.151% (RMSE 9.26 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 0.115% (RMSE 10.3 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 0.379% (RMSE 10.28 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 2.59% (RMSE 10.17 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 8.542% (RMSE 9.85 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 3.757% (RMSE 10.11 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 14.692% (RMSE 9.52 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 4.673% (RMSE 10.06 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 14.187% (RMSE 9.54 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 12.574% (RMSE 9.63 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 10.204% (RMSE 9.76 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 13.622% (RMSE 9.58 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 4.044% (RMSE 10.09 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 7.256% (RMSE 9.92 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: -11.051% (RMSE 10.86 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: -8.288% (RMSE 10.72 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 3.247% (RMSE 10.13 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 27.493% (RMSE 8.77 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 52.87% (RMSE 7.07 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.135% (RMSE 2.03 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 86.431% (RMSE 3.8 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 71.872% (RMSE 5.46 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 22.386% (RMSE 9.08 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 59.421% (RMSE 6.56 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 61.956% (RMSE 6.35 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 51.89% (RMSE 7.15 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 18.824% (RMSE 9.28 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 76.61% (RMSE 4.98 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 1.729% (RMSE 10.21 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 78.077% (RMSE 4.82 vs. 10.3 null)
## # A tibble: 33 × 2
##    pred                                 rfAcc
##    <chr>                                <dbl>
##  1 pct_soil_temperature_0_to_7cm      0.961  
##  2 pct_soil_temperature_7_to_28cm     0.864  
##  3 pct_dewpoint_2m                    0.837  
##  4 doy                                0.781  
##  5 month                              0.766  
##  6 pct_soil_temperature_28_to_100cm   0.719  
##  7 pct_soil_moisture_7_to_28cm        0.620  
##  8 pct_soil_moisture_0_to_7cm         0.594  
##  9 pct_vapor_pressure_deficit         0.529  
## 10 pct_soil_moisture_28_to_100cm      0.519  
## 11 pct_et0_fao_evapotranspiration     0.275  
## 12 pct_soil_temperature_100_to_255cm  0.224  
## 13 pct_pressure_msl                   0.203  
## 14 pct_surface_pressure               0.192  
## 15 pct_soil_moisture_100_to_255cm     0.188  
## 16 pct_cloudcover_mid                 0.147  
## 17 pct_shortwave_radiation            0.142  
## 18 pct_diffuse_radiation              0.136  
## 19 pct_direct_radiation               0.126  
## 20 pct_direct_normal_irradiance       0.102  
## 21 pct_cloudcover                     0.0854 
## 22 pct_windspeed_100m                 0.0726 
## 23 pct_cloudcover_high                0.0467 
## 24 pct_windspeed_10m                  0.0404 
## 25 pct_cloudcover_low                 0.0376 
## 26 pct_windgusts_10m                  0.0325 
## 27 pct_snowfall                       0.0259 
## 28 tod                                0.0173 
## 29 pct_rain                           0.00379
## 30 pct_precipitation                  0.00115
## 31 pct_relativehumidity_2m           -0.00449
## 32 pct_winddirection_100m            -0.0829 
## 33 pct_winddirection_10m             -0.111

The best predictor for temperature is extracted, with a function written for reuse:

getNextBestVar <- function(x, returnTbl=FALSE, n=if(isTRUE(returnTbl)) +Inf else 1) {
    
    # FUNCTION ARGUMENTS:
    # x: named vector of accuracy or r-squared
    # returnTbl: boolean, if TRUE convert to tibble and return, if FALSE return vector of top-n predictors 
    # n: number of predictrs to return (+Inf will return the full tibble or vector)
    
    tbl <- vecToTibble(x, colNameName="pred") %>%
        arrange(-value) %>%
        slice_head(n=n)
    if(isTRUE(returnTbl)) return(tbl)
    else return(tbl %>% pull(pred))
    
}

getNextBestVar(rnbpContOne)
## [1] "pct_soil_temperature_0_to_7cm"
getNextBestVar(rnbpContOne, n=4)
## [1] "pct_soil_temperature_0_to_7cm"  "pct_soil_temperature_7_to_28cm"
## [3] "pct_dewpoint_2m"                "doy"
getNextBestVar(rnbpContOne, returnTbl=TRUE)
## # A tibble: 33 × 2
##    pred                             value
##    <chr>                            <dbl>
##  1 pct_soil_temperature_0_to_7cm    0.961
##  2 pct_soil_temperature_7_to_28cm   0.864
##  3 pct_dewpoint_2m                  0.837
##  4 doy                              0.781
##  5 month                            0.766
##  6 pct_soil_temperature_28_to_100cm 0.719
##  7 pct_soil_moisture_7_to_28cm      0.620
##  8 pct_soil_moisture_0_to_7cm       0.594
##  9 pct_vapor_pressure_deficit       0.529
## 10 pct_soil_moisture_28_to_100cm    0.519
## # ℹ 23 more rows

The functions are run recursively for the first three predictors of continuous variable temperature:

yVar <- "temperature_2m"
yCont <- TRUE
varsTemp <- c(varsTrain[!str_detect(varsTrain, "pct_temp|apparent")], "month", "tod", "doy")
varsRun <- 3
rnbpTempList <- vector("list", varsRun)

for(intCtr in 1:varsRun) { 
    if(intCtr==1) xFix<-character(0) 
    else xFix<-lapply(rnbpTempList[1:(intCtr-1)], FUN=function(x) x$pred[1]) %>% reduce(.f=c)
    rnbpTempList[[intCtr]] <- runNextBestPredictor(varsRun=setdiff(varsTemp, xFix), 
                                                   xFix=xFix, 
                                                   yVar=yVar, 
                                                   isContVar=yCont, 
                                                   dfTrain=dfTrain, 
                                                   dfTest=dfTest
                                                   ) %>% 
        getNextBestVar(returnTbl = TRUE)
}
## 
## R-squared of predictions based on training data applied to holdout dataset is: -0.449% (RMSE 10.33 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 83.691% (RMSE 4.16 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 20.312% (RMSE 9.2 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 19.153% (RMSE 9.26 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 0.114% (RMSE 10.3 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 0.387% (RMSE 10.28 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 2.591% (RMSE 10.17 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 8.543% (RMSE 9.85 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 3.754% (RMSE 10.11 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 14.692% (RMSE 9.52 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 4.674% (RMSE 10.06 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 14.187% (RMSE 9.54 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 12.577% (RMSE 9.63 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 10.195% (RMSE 9.76 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 13.626% (RMSE 9.58 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 4.043% (RMSE 10.09 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 7.263% (RMSE 9.92 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: -11.047% (RMSE 10.86 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: -8.316% (RMSE 10.72 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 3.243% (RMSE 10.13 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 27.494% (RMSE 8.77 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 52.873% (RMSE 7.07 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.134% (RMSE 2.03 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 86.428% (RMSE 3.8 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 71.877% (RMSE 5.46 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 22.374% (RMSE 9.08 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 59.428% (RMSE 6.56 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 61.959% (RMSE 6.35 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 51.879% (RMSE 7.15 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 18.784% (RMSE 9.29 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 76.613% (RMSE 4.98 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 1.729% (RMSE 10.21 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 78.067% (RMSE 4.83 vs. 10.3 null)
## # A tibble: 33 × 2
##    pred                                 rfAcc
##    <chr>                                <dbl>
##  1 pct_soil_temperature_0_to_7cm      0.961  
##  2 pct_soil_temperature_7_to_28cm     0.864  
##  3 pct_dewpoint_2m                    0.837  
##  4 doy                                0.781  
##  5 month                              0.766  
##  6 pct_soil_temperature_28_to_100cm   0.719  
##  7 pct_soil_moisture_7_to_28cm        0.620  
##  8 pct_soil_moisture_0_to_7cm         0.594  
##  9 pct_vapor_pressure_deficit         0.529  
## 10 pct_soil_moisture_28_to_100cm      0.519  
## 11 pct_et0_fao_evapotranspiration     0.275  
## 12 pct_soil_temperature_100_to_255cm  0.224  
## 13 pct_pressure_msl                   0.203  
## 14 pct_surface_pressure               0.192  
## 15 pct_soil_moisture_100_to_255cm     0.188  
## 16 pct_cloudcover_mid                 0.147  
## 17 pct_shortwave_radiation            0.142  
## 18 pct_diffuse_radiation              0.136  
## 19 pct_direct_radiation               0.126  
## 20 pct_direct_normal_irradiance       0.102  
## 21 pct_cloudcover                     0.0854 
## 22 pct_windspeed_100m                 0.0726 
## 23 pct_cloudcover_high                0.0467 
## 24 pct_windspeed_10m                  0.0404 
## 25 pct_cloudcover_low                 0.0375 
## 26 pct_windgusts_10m                  0.0324 
## 27 pct_snowfall                       0.0259 
## 28 tod                                0.0173 
## 29 pct_rain                           0.00387
## 30 pct_precipitation                  0.00114
## 31 pct_relativehumidity_2m           -0.00449
## 32 pct_winddirection_100m            -0.0832 
## 33 pct_winddirection_10m             -0.110  
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.57% (RMSE 1.91 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.079% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.095% (RMSE 2.04 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.094% (RMSE 2.04 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 85.409% (RMSE 3.94 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 86.161% (RMSE 3.83 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 81.562% (RMSE 4.42 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.058% (RMSE 2.29 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 91.435% (RMSE 3.02 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 91.673% (RMSE 2.97 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 90.655% (RMSE 3.15 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 94.179% (RMSE 2.49 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 93.814% (RMSE 2.56 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 93.938% (RMSE 2.54 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 94.506% (RMSE 2.42 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.161% (RMSE 2.02 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.234% (RMSE 2 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.451% (RMSE 2.2 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.536% (RMSE 2.18 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.256% (RMSE 1.99 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.162% (RMSE 2.02 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.411% (RMSE 1.95 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.436% (RMSE 1.95 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.265% (RMSE 1.99 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.896% (RMSE 2.09 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.13% (RMSE 2.03 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.017% (RMSE 2.06 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.934% (RMSE 2.08 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.866% (RMSE 2.09 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.679% (RMSE 2.14 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 84.38% (RMSE 4.07 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.09% (RMSE 2.04 vs. 10.3 null)
## # A tibble: 32 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_dewpoint_2m                   0.981
##  2 pct_relativehumidity_2m           0.966
##  3 pct_soil_temperature_7_to_28cm    0.964
##  4 pct_vapor_pressure_deficit        0.964
##  5 pct_soil_temperature_28_to_100cm  0.963
##  6 pct_windgusts_10m                 0.963
##  7 pct_windspeed_100m                0.962
##  8 pct_et0_fao_evapotranspiration    0.962
##  9 pct_windspeed_10m                 0.962
## 10 pct_soil_moisture_0_to_7cm        0.961
## 11 pct_pressure_msl                  0.961
## 12 pct_surface_pressure              0.961
## 13 doy                               0.961
## 14 pct_soil_moisture_7_to_28cm       0.960
## 15 pct_soil_moisture_28_to_100cm     0.959
## 16 pct_soil_temperature_100_to_255cm 0.959
## 17 pct_soil_moisture_100_to_255cm    0.959
## 18 month                             0.957
## 19 pct_winddirection_100m            0.955
## 20 pct_winddirection_10m             0.955
## 21 pct_cloudcover                    0.951
## 22 pct_diffuse_radiation             0.945
## 23 pct_shortwave_radiation           0.942
## 24 pct_direct_normal_irradiance      0.939
## 25 pct_direct_radiation              0.938
## 26 pct_cloudcover_mid                0.917
## 27 pct_cloudcover_low                0.914
## 28 pct_cloudcover_high               0.907
## 29 pct_rain                          0.862
## 30 pct_precipitation                 0.854
## 31 tod                               0.844
## 32 pct_snowfall                      0.816
## 
## R-squared of predictions based on training data applied to holdout dataset is: 99.82% (RMSE 0.44 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.076% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.072% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 95.025% (RMSE 2.3 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 94.949% (RMSE 2.32 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 91.891% (RMSE 2.93 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.889% (RMSE 1.5 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.293% (RMSE 1.98 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.054% (RMSE 1.77 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 96.47% (RMSE 1.94 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.076% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.054% (RMSE 1.44 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.907% (RMSE 1.49 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.102% (RMSE 1.42 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.082% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.08% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.622% (RMSE 1.59 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.629% (RMSE 1.59 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.075% (RMSE 1.43 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.575% (RMSE 1.23 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 99.874% (RMSE 0.37 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.264% (RMSE 1.36 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.268% (RMSE 1.36 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.055% (RMSE 1.44 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.094% (RMSE 1.42 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.101% (RMSE 1.42 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.922% (RMSE 1.49 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 97.957% (RMSE 1.47 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.022% (RMSE 1.45 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 93.295% (RMSE 2.67 vs. 10.3 null)
## 
## R-squared of predictions based on training data applied to holdout dataset is: 98.113% (RMSE 1.42 vs. 10.3 null)
## # A tibble: 31 × 2
##    pred                              rfAcc
##    <chr>                             <dbl>
##  1 pct_vapor_pressure_deficit        0.999
##  2 pct_relativehumidity_2m           0.998
##  3 pct_et0_fao_evapotranspiration    0.986
##  4 pct_soil_temperature_28_to_100cm  0.983
##  5 pct_soil_temperature_7_to_28cm    0.983
##  6 doy                               0.981
##  7 pct_diffuse_radiation             0.981
##  8 pct_soil_moisture_7_to_28cm       0.981
##  9 pct_soil_moisture_0_to_7cm        0.981
## 10 pct_windspeed_10m                 0.981
## 11 pct_windspeed_100m                0.981
## 12 pct_shortwave_radiation           0.981
## 13 pct_pressure_msl                  0.981
## 14 pct_windgusts_10m                 0.981
## 15 pct_surface_pressure              0.981
## 16 pct_soil_temperature_100_to_255cm 0.981
## 17 pct_direct_radiation              0.981
## 18 month                             0.980
## 19 pct_soil_moisture_100_to_255cm    0.980
## 20 pct_soil_moisture_28_to_100cm     0.979
## 21 pct_direct_normal_irradiance      0.979
## 22 pct_cloudcover                    0.979
## 23 pct_winddirection_100m            0.976
## 24 pct_winddirection_10m             0.976
## 25 pct_cloudcover_mid                0.971
## 26 pct_cloudcover_high               0.965
## 27 pct_cloudcover_low                0.963
## 28 pct_precipitation                 0.950
## 29 pct_rain                          0.949
## 30 tod                               0.933
## 31 pct_snowfall                      0.919
rnbpTempList
## [[1]]
## # A tibble: 33 × 2
##    pred                             value
##    <chr>                            <dbl>
##  1 pct_soil_temperature_0_to_7cm    0.961
##  2 pct_soil_temperature_7_to_28cm   0.864
##  3 pct_dewpoint_2m                  0.837
##  4 doy                              0.781
##  5 month                            0.766
##  6 pct_soil_temperature_28_to_100cm 0.719
##  7 pct_soil_moisture_7_to_28cm      0.620
##  8 pct_soil_moisture_0_to_7cm       0.594
##  9 pct_vapor_pressure_deficit       0.529
## 10 pct_soil_moisture_28_to_100cm    0.519
## # ℹ 23 more rows
## 
## [[2]]
## # A tibble: 32 × 2
##    pred                             value
##    <chr>                            <dbl>
##  1 pct_dewpoint_2m                  0.981
##  2 pct_relativehumidity_2m          0.966
##  3 pct_soil_temperature_7_to_28cm   0.964
##  4 pct_vapor_pressure_deficit       0.964
##  5 pct_soil_temperature_28_to_100cm 0.963
##  6 pct_windgusts_10m                0.963
##  7 pct_windspeed_100m               0.962
##  8 pct_et0_fao_evapotranspiration   0.962
##  9 pct_windspeed_10m                0.962
## 10 pct_soil_moisture_0_to_7cm       0.961
## # ℹ 22 more rows
## 
## [[3]]
## # A tibble: 31 × 2
##    pred                             value
##    <chr>                            <dbl>
##  1 pct_vapor_pressure_deficit       0.999
##  2 pct_relativehumidity_2m          0.998
##  3 pct_et0_fao_evapotranspiration   0.986
##  4 pct_soil_temperature_28_to_100cm 0.983
##  5 pct_soil_temperature_7_to_28cm   0.983
##  6 doy                              0.981
##  7 pct_diffuse_radiation            0.981
##  8 pct_soil_moisture_7_to_28cm      0.981
##  9 pct_soil_moisture_0_to_7cm       0.981
## 10 pct_windspeed_10m                0.981
## # ℹ 21 more rows